org_text
stringlengths
761
968k
texts
sequence
scores
sequence
num_lines
int64
1
25.7k
avg_score
float64
0
0.31
"""Support for Lutron switches.""" import logging from homeassistant.components.switch import SwitchDevice from . import LUTRON_CONTROLLER, LUTRON_DEVICES, LutronDevice _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Lutron switches.""" devs = [] for (area_name, device) in hass.data[LUTRON_DEVICES]['switch']: dev = LutronSwitch(area_name, device, hass.data[LUTRON_CONTROLLER]) devs.append(dev) add_entities(devs, True) class LutronSwitch(LutronDevice, SwitchDevice): """Representation of a Lutron Switch.""" def turn_on(self, **kwargs): """Turn the switch on.""" self._lutron_device.level = 100 def turn_off(self, **kwargs): """Turn the switch off.""" self._lutron_device.level = 0 @property def device_state_attributes(self): """Return the state attributes.""" attr = {} attr['lutron_integration_id'] = self._lutron_device.id return attr @property def is_on(self): """Return true if device is on.""" return self._lutron_device.last_level() > 0
[ "\"\"\"Support for Lutron switches.\"\"\"\n", "import logging\n", "\n", "from homeassistant.components.switch import SwitchDevice\n", "\n", "from . import LUTRON_CONTROLLER, LUTRON_DEVICES, LutronDevice\n", "\n", "_LOGGER = logging.getLogger(__name__)\n", "\n", "\n", "def setup_platform(hass, config, add_entities, discovery_info=None):\n", " \"\"\"Set up the Lutron switches.\"\"\"\n", " devs = []\n", " for (area_name, device) in hass.data[LUTRON_DEVICES]['switch']:\n", " dev = LutronSwitch(area_name, device, hass.data[LUTRON_CONTROLLER])\n", " devs.append(dev)\n", "\n", " add_entities(devs, True)\n", "\n", "\n", "class LutronSwitch(LutronDevice, SwitchDevice):\n", " \"\"\"Representation of a Lutron Switch.\"\"\"\n", "\n", " def turn_on(self, **kwargs):\n", " \"\"\"Turn the switch on.\"\"\"\n", " self._lutron_device.level = 100\n", "\n", " def turn_off(self, **kwargs):\n", " \"\"\"Turn the switch off.\"\"\"\n", " self._lutron_device.level = 0\n", "\n", " @property\n", " def device_state_attributes(self):\n", " \"\"\"Return the state attributes.\"\"\"\n", " attr = {}\n", " attr['lutron_integration_id'] = self._lutron_device.id\n", " return attr\n", "\n", " @property\n", " def is_on(self):\n", " \"\"\"Return true if device is on.\"\"\"\n", " return self._lutron_device.last_level() > 0\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
42
0
#! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright © 2020 damian <damian@damian-laptop> # # Distributed under terms of the MIT license. import boto3 import datetime import time import logging import argparse logging.basicConfig(level=logging.INFO) def time_interval_to_timedelta(time_interval): time_value = time_interval[0:-1] time_unit = time_interval[-1] if time_unit == "m": return datetime.timedelta(minutes=int(time_value)) if time_unit == "h": return datetime.timedelta(hours=int(time_value)) if time_unit == "d": return datetime.timedelta(days=int(time_value)) def datetime_from_utc_to_local(utc_datetime): now_timestamp = time.time() offset = datetime.datetime.fromtimestamp(now_timestamp) - datetime.datetime.utcfromtimestamp(now_timestamp) return utc_datetime + offset APPNAMES = ['homedelivery', "lambda_api", "homedelivery_all", "spaceadapter", "baskethandler", "orderadapter"] def get_log_groups_of_app(appname, env): if not appname in APPNAMES: logging.error(f"undefined appname selected: '{appname}'. Only those appnames are defined: {APPNAMES}") return None if appname == "homedelivery": return [ f"clg-euw1-{env}-storepick-os-bp-001", f"/aws/lambda/lmb-euw1-{env}-digital-atp-s3_processing", f"/aws/lambda/lmb-euw1-{env}-digital-atp-bq-lambda-trigger", f"/aws/lambda/lmb-euw1-{env}-digital-atp-ecs-task-trigger", ] elif appname == "lambda_api": return [ f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchCancel-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchCreate-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchDocGet-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchLatest-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchPatchItems-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchPostItems-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchPostLocations-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchStartProc-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchStatusGet-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-customer_collect_get_001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-customer_collect_put_001", ] elif appname == "homedelivery_all": return [ f"clg-euw1-{env}-storepick-os-bp-001", f"/aws/lambda/lmb-euw1-{env}-digital-atp-s3_processing", f"/aws/lambda/lmb-euw1-{env}-digital-atp-bq-lambda-trigger", f"/aws/lambda/lmb-euw1-{env}-digital-atp-ecs-task-trigger", #f"clg-euw1-{env}-storepick-mock-servers-mocks-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchCancel-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchCreate-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchDocGet-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchLatest-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchPatchItems-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchPostItems-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchPostLocations-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchStartProc-001", f"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchStatusGet-001", ] elif appname == "spaceadapter": return [ f"spaceadapter-{env}" ] elif appname == "orderadapter": return [ f"order-adapter-task-{env}", f"/aws/lambda/OrderAdapterLambda-{env.upper()}" ] elif appname == "baskethandler": return [ f"basket-handler-task-{env}" ] else: logging.error(f"Not found log groups for {appname} in {env}") return None def is_recent_event_achieved(recent_log_event, log_event): if recent_log_event == None: return True log_fields = {field['field'] : field['value'] for field in log_event} recent_log_fields = {field['field'] : field['value'] for field in recent_log_event} for field in log_fields.keys(): if log_fields.get(field) != recent_log_fields.get(field): return False return True def get_logs(env, startTime, endTime ,query, limit, appname=None, log_groups=None): client = boto3.client('logs') if appname: log_groups = get_log_groups_of_app(appname, env) if not log_groups: logging.error("log_groups list is empty") return MAX_SINGLE_QUERY_LIMIT=10000 results = { 'results' : [] } recent_timestamp = None recent_log_event = None filename = "/tmp/default_aws_insights_logs" if appname: filename = "/tmp/appname" output_file = open(filename, "w+") while len(results['results']) in (0, MAX_SINGLE_QUERY_LIMIT): if recent_timestamp: startTime = datetime.datetime.strptime(str(recent_timestamp), "%Y-%m-%d %H:%M:%S.%f") startTime = datetime_from_utc_to_local(startTime) #print(f"recent_timestamp: {recent_timestamp}") #print(f"startTime: {startTime.timestamp()}") #print(f"endTime: {endTime.timestamp()}") start = response = client.start_query( logGroupNames = log_groups, startTime = int(startTime.timestamp()), endTime = int(endTime.timestamp()), queryString = query, limit = limit ) status = 'Init' while status not in ('Complete', 'Failed','Cancelled', 'Timeout'): logging.info("waiting for query results") time.sleep(10) results = client.get_query_results(queryId=start['queryId']) status = results['status'] should_print_log_event = False for log_event in results['results']: log_fields = {field['field'] : field['value'] for field in log_event} line = "" for field in log_fields.keys(): if field != "@ptr": line += f"{log_fields[field]} " line = line.rstrip() if not should_print_log_event: should_print_log_event = is_recent_event_achieved(recent_log_event, log_event) else: print(line) output_file.write(line) recent_timestamp = log_fields.get('@timestamp') #if len(results['results']) > 0: recent_log_event = results['results'][-1] output_file.close() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--env', help='test env name. Default: dev', default="dev") parser.add_argument('--limit', help='limit of messages', type=int, default=10000) parser.add_argument('--timedelta', help='delta time since now ex. 120m, 3h, 2d. Default: 60m', default="60m") parser.add_argument('--start', help='start time in the format of: YYYY-MM-DD HH:MM:SS') parser.add_argument('--end', help='end in the format of: YYYY-MM-DD HH:MM:SS') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--appname', help=f'name of the app which logs should be analysed. Defined apps: {APPNAMES}') group.add_argument('--log_groups', help='list of the log groups to analyse (up to 20)', nargs="+") parser.add_argument('--filter', help='regex filter for the default query', default="") default_query = f"fields @timestamp, @message | filter @message like // | sort @timestamp" parser.add_argument('--query', help=f'insights query. Default: {default_query}', default=default_query) args = parser.parse_args() if args.query == default_query: args.query = f"fields @timestamp, @message | filter @message like /{args.filter}/ | sort @timestamp" startTime = None endTime = None if args.start and not args.end: startTime = datetime.datetime.strptime(args.start, "%Y-%m-%d %H:%M:%S") endTime = datetime.datetime.now() if args.start and args.end: startTime = datetime.datetime.strptime(args.start, "%Y-%m-%d %H:%M:%S") endTime = datetime.datetime.strptime(args.end, "%Y-%m-%d %H:%M:%S") elif args.timedelta: endTime = datetime.datetime.now() startTime = endTime - time_interval_to_timedelta(args.timedelta) else: logging.error("ERROR => Neither start/end pair nor timedelta is defined") parser.print_help() get_logs(args.env, startTime, endTime, args.query, args.limit, args.appname, args.log_groups)
[ "#! /usr/bin/env python\n", "# -*- coding: utf-8 -*-\n", "# vim:fenc=utf-8\n", "#\n", "# Copyright © 2020 damian <damian@damian-laptop>\n", "#\n", "# Distributed under terms of the MIT license.\n", "\n", "\n", "import boto3\n", "import datetime\n", "import time\n", "import logging\n", "import argparse\n", "\n", "logging.basicConfig(level=logging.INFO)\n", "\n", "def time_interval_to_timedelta(time_interval):\n", " time_value = time_interval[0:-1]\n", " time_unit = time_interval[-1]\n", "\n", " if time_unit == \"m\":\n", " return datetime.timedelta(minutes=int(time_value))\n", " if time_unit == \"h\":\n", " return datetime.timedelta(hours=int(time_value))\n", " if time_unit == \"d\":\n", " return datetime.timedelta(days=int(time_value))\n", "\n", "def datetime_from_utc_to_local(utc_datetime):\n", " now_timestamp = time.time()\n", " offset = datetime.datetime.fromtimestamp(now_timestamp) - datetime.datetime.utcfromtimestamp(now_timestamp)\n", " return utc_datetime + offset\n", "\n", "APPNAMES = ['homedelivery', \"lambda_api\", \"homedelivery_all\", \"spaceadapter\", \"baskethandler\", \"orderadapter\"]\n", "\n", "def get_log_groups_of_app(appname, env):\n", " \n", " if not appname in APPNAMES:\n", " logging.error(f\"undefined appname selected: '{appname}'. Only those appnames are defined: {APPNAMES}\")\n", " return None\n", " \n", " if appname == \"homedelivery\":\n", " return [\n", " f\"clg-euw1-{env}-storepick-os-bp-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-atp-s3_processing\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-atp-bq-lambda-trigger\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-atp-ecs-task-trigger\",\n", " ]\n", " elif appname == \"lambda_api\":\n", " return [\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchCancel-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchCreate-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchDocGet-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchLatest-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchPatchItems-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchPostItems-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchPostLocations-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchStartProc-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchStatusGet-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-customer_collect_get_001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-customer_collect_put_001\",\n", " ]\n", " elif appname == \"homedelivery_all\":\n", " return [\n", " f\"clg-euw1-{env}-storepick-os-bp-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-atp-s3_processing\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-atp-bq-lambda-trigger\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-atp-ecs-task-trigger\",\n", " #f\"clg-euw1-{env}-storepick-mock-servers-mocks-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchCancel-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchCreate-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchDocGet-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchLatest-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchPatchItems-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchPostItems-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchPostLocations-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchStartProc-001\",\n", " f\"/aws/lambda/lmb-euw1-{env}-digital-strpck-batchStatusGet-001\",\n", " ]\n", " elif appname == \"spaceadapter\":\n", " return [\n", " f\"spaceadapter-{env}\"\n", " ]\n", " elif appname == \"orderadapter\":\n", " return [\n", " f\"order-adapter-task-{env}\",\n", " f\"/aws/lambda/OrderAdapterLambda-{env.upper()}\"\n", " ]\n", " elif appname == \"baskethandler\":\n", " return [\n", " f\"basket-handler-task-{env}\"\n", " ]\n", " else:\n", " logging.error(f\"Not found log groups for {appname} in {env}\")\n", " return None\n", "\n", "def is_recent_event_achieved(recent_log_event, log_event):\n", "\n", " if recent_log_event == None:\n", " return True\n", "\n", " log_fields = {field['field'] : field['value'] for field in log_event}\n", " recent_log_fields = {field['field'] : field['value'] for field in recent_log_event}\n", "\n", " for field in log_fields.keys():\n", " if log_fields.get(field) != recent_log_fields.get(field):\n", " return False\n", "\n", " return True\n", "\n", "\n", "def get_logs(env, startTime, endTime ,query, limit, appname=None, log_groups=None):\n", " client = boto3.client('logs')\n", "\n", " if appname:\n", " log_groups = get_log_groups_of_app(appname, env)\n", "\n", " if not log_groups:\n", " logging.error(\"log_groups list is empty\")\n", " return\n", "\n", " MAX_SINGLE_QUERY_LIMIT=10000\n", " results = { 'results' : [] }\n", " recent_timestamp = None\n", " recent_log_event = None\n", "\n", " filename = \"/tmp/default_aws_insights_logs\"\n", " if appname:\n", " filename = \"/tmp/appname\"\n", "\n", " output_file = open(filename, \"w+\")\n", "\n", " while len(results['results']) in (0, MAX_SINGLE_QUERY_LIMIT):\n", " if recent_timestamp:\n", " startTime = datetime.datetime.strptime(str(recent_timestamp), \"%Y-%m-%d %H:%M:%S.%f\")\n", " startTime = datetime_from_utc_to_local(startTime)\n", "\n", " #print(f\"recent_timestamp: {recent_timestamp}\")\n", " #print(f\"startTime: {startTime.timestamp()}\")\n", " #print(f\"endTime: {endTime.timestamp()}\")\n", "\n", " start = response = client.start_query(\n", " logGroupNames = log_groups,\n", " startTime = int(startTime.timestamp()),\n", " endTime = int(endTime.timestamp()),\n", " queryString = query,\n", " limit = limit\n", " )\n", "\n", " status = 'Init'\n", " while status not in ('Complete', 'Failed','Cancelled', 'Timeout'):\n", " logging.info(\"waiting for query results\")\n", " time.sleep(10)\n", " results = client.get_query_results(queryId=start['queryId'])\n", " status = results['status']\n", "\n", "\n", " should_print_log_event = False\n", " for log_event in results['results']:\n", " log_fields = {field['field'] : field['value'] for field in log_event}\n", "\n", " line = \"\"\n", " for field in log_fields.keys():\n", " if field != \"@ptr\":\n", " line += f\"{log_fields[field]} \"\n", " line = line.rstrip()\n", "\n", " if not should_print_log_event:\n", " should_print_log_event = is_recent_event_achieved(recent_log_event, log_event)\n", " else:\n", " print(line)\n", " output_file.write(line)\n", "\n", " recent_timestamp = log_fields.get('@timestamp')\n", "\n", " #if len(results['results']) > 0:\n", " recent_log_event = results['results'][-1]\n", "\n", " output_file.close()\n", "\n", "if __name__ == \"__main__\": \n", "\n", " parser = argparse.ArgumentParser()\n", " parser.add_argument('--env', help='test env name. Default: dev', default=\"dev\")\n", " parser.add_argument('--limit', help='limit of messages', type=int, default=10000)\n", "\n", " parser.add_argument('--timedelta', help='delta time since now ex. 120m, 3h, 2d. Default: 60m', default=\"60m\")\n", "\n", " parser.add_argument('--start', help='start time in the format of: YYYY-MM-DD HH:MM:SS')\n", " parser.add_argument('--end', help='end in the format of: YYYY-MM-DD HH:MM:SS')\n", "\n", " group = parser.add_mutually_exclusive_group(required=True)\n", " group.add_argument('--appname', help=f'name of the app which logs should be analysed. Defined apps: {APPNAMES}')\n", " group.add_argument('--log_groups', help='list of the log groups to analyse (up to 20)', nargs=\"+\")\n", "\n", " parser.add_argument('--filter', help='regex filter for the default query', default=\"\")\n", "\n", " default_query = f\"fields @timestamp, @message | filter @message like // | sort @timestamp\" \n", " parser.add_argument('--query', help=f'insights query. Default: {default_query}', default=default_query)\n", " args = parser.parse_args()\n", "\n", " if args.query == default_query:\n", " args.query = f\"fields @timestamp, @message | filter @message like /{args.filter}/ | sort @timestamp\" \n", "\n", " startTime = None\n", " endTime = None\n", "\n", " if args.start and not args.end:\n", " startTime = datetime.datetime.strptime(args.start, \"%Y-%m-%d %H:%M:%S\")\n", " endTime = datetime.datetime.now()\n", " if args.start and args.end:\n", " startTime = datetime.datetime.strptime(args.start, \"%Y-%m-%d %H:%M:%S\")\n", " endTime = datetime.datetime.strptime(args.end, \"%Y-%m-%d %H:%M:%S\")\n", " elif args.timedelta:\n", " endTime = datetime.datetime.now()\n", " startTime = endTime - time_interval_to_timedelta(args.timedelta)\n", " else:\n", " logging.error(\"ERROR => Neither start/end pair nor timedelta is defined\")\n", " parser.print_help()\n", "\n", " get_logs(args.env, startTime, endTime, args.query, args.limit, args.appname, args.log_groups)\n", "\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02127659574468085, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.021739130434782608, 0, 0.008928571428571428, 0, 0, 0.018018018018018018, 0, 0.024390243902439025, 0.25, 0.03125, 0.009009009009009009, 0, 0.1111111111111111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0.012345679012345678, 0.011764705882352941, 0.012345679012345678, 0.012345679012345678, 0.011494252873563218, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0, 0.014492753623188406, 0, 0, 0, 0, 0.012195121951219513, 0.012345679012345678, 0.011764705882352941, 0.012345679012345678, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01694915254237288, 0, 0.030303030303030304, 0, 0, 0.013513513513513514, 0.022727272727272728, 0, 0, 0, 0, 0, 0, 0, 0, 0.03571428571428571, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.030303030303030304, 0.09090909090909091, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01020408163265306, 0, 0, 0.017857142857142856, 0.018518518518518517, 0.02, 0, 0, 0.045454545454545456, 0.03571428571428571, 0.038461538461538464, 0.05405405405405406, 0.06666666666666667, 0, 0, 0, 0.013333333333333334, 0, 0, 0, 0, 0, 0, 0.02564102564102564, 0, 0.024390243902439025, 0, 0, 0, 0, 0, 0, 0, 0, 0.010526315789473684, 0, 0, 0, 0, 0, 0, 0.024390243902439025, 0, 0, 0, 0, 0.06451612903225806, 0, 0, 0.011904761904761904, 0.011627906976744186, 0, 0.008771929824561403, 0, 0.010869565217391304, 0.012048192771084338, 0, 0, 0.008547008547008548, 0.009708737864077669, 0, 0.01098901098901099, 0, 0.020833333333333332, 0.009259259259259259, 0, 0, 0, 0.01818181818181818, 0, 0.045454545454545456, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0.01020408163265306, 1 ]
222
0.011558
import sys class Progressbar(object): def __init__(self, finalcount, block_char='.'): self.finalcount = finalcount self.blockcount = 0 self.block = block_char self.f = sys.stdout if not self.finalcount: return self.f.write('\n------------------ % Progress -------------------1\n') def progress(self, count): count = min(count, self.finalcount) if self.finalcount: percentcomplete = int(round(100.0 * count / self.finalcount)) if percentcomplete < 1: percentcomplete = 1 else: percentcomplete = 100 blockcount = int(percentcomplete // 2) if blockcount <= self.blockcount: return for i in range(self.blockcount, blockcount): self.f.write(self.block) self.f.flush() self.blockcount = blockcount if percentcomplete == 100: self.f.write("\n") if __name__ == "__main__": from time import sleep pb = Progressbar(100, '#') for i in range(0, 101): pb.progress(i) sleep(0.1)
[ "import sys\n", "\n", "\n", "class Progressbar(object):\n", " def __init__(self, finalcount, block_char='.'):\n", " self.finalcount = finalcount\n", " self.blockcount = 0\n", " self.block = block_char\n", " self.f = sys.stdout\n", " if not self.finalcount: return\n", " self.f.write('\\n------------------ % Progress -------------------1\\n')\n", "\n", " def progress(self, count):\n", " count = min(count, self.finalcount)\n", " if self.finalcount:\n", " percentcomplete = int(round(100.0 * count / self.finalcount))\n", " if percentcomplete < 1: percentcomplete = 1\n", " else:\n", " percentcomplete = 100\n", " blockcount = int(percentcomplete // 2)\n", " if blockcount <= self.blockcount:\n", " return\n", " for i in range(self.blockcount, blockcount):\n", " self.f.write(self.block)\n", " self.f.flush()\n", " self.blockcount = blockcount\n", " if percentcomplete == 100:\n", " self.f.write(\"\\n\")\n", "\n", "\n", "if __name__ == \"__main__\":\n", " from time import sleep\n", "\n", " pb = Progressbar(100, '#')\n", "\n", " for i in range(0, 101):\n", " pb.progress(i)\n", " sleep(0.1)\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02564102564102564, 0, 0, 0, 0, 0, 0, 0.017857142857142856, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
38
0.001145
#!/usr/bin/python2.6 """ Generates graph for attendance and pre/post work. Processes the output of: .mode csv .output attend.csv select year, month, sum(type='attendance'), sum(type='prework'), sum(type='postwork') from attend group by year,month order by year, month ; """ from pychart import * import csv NUM_YEARS = 5 theme.use_color=True theme.default_font_size=16 theme.default_line_width=2 theme.reinitialize() csv_reader = csv.reader(file('kansai.csv')) data = [] halfyear = [] # half-year moving average. halfyear2 = [] halfyear3 = [] i = 1 for row in csv_reader: halfyear.append(int(row[2])) if len(halfyear) > 12: halfyear = halfyear[1:] data.append((i, int(row[2]), sum(halfyear) / len(halfyear), )) i = i + 1 xaxis = axis.X(tic_interval = 12, label="Year Month", format=lambda x: "%i-%i" % ((x-1)/12+2007, (x-1)%12+1)) yaxis = axis.Y(tic_interval = 10, label="Attendees") chart_object.set_defaults(area.T, size = (480, 300)) ar = area.T(x_axis=xaxis, y_axis=yaxis, x_range=(1,12*NUM_YEARS), y_range=(0,None), legend = legend.T(loc=(30,240))) plot1 = line_plot.T(label="n. of attendees", data=data, ycol=1) plot2 = line_plot.T(label="n. of attendees(1 yr avg)", data=data, ycol=2) #ar.add_plot(plot, plot2, plot3, plot4, plot5, plot6) ar.add_plot(plot1, plot2) can = canvas.init("kansai.png") ar.draw(can)
[ "#!/usr/bin/python2.6\n", "\"\"\"\n", "Generates graph for attendance and pre/post work.\n", "Processes the output of:\n", "\n", ".mode csv\n", ".output attend.csv\n", "select year, month, sum(type='attendance'), sum(type='prework'), sum(type='postwork') from attend group by year,month order by year, month ;\n", "\n", "\"\"\"\n", "\n", "from pychart import *\n", "import csv\n", "\n", "NUM_YEARS = 5\n", "\n", "theme.use_color=True\n", "theme.default_font_size=16\n", "theme.default_line_width=2\n", "theme.reinitialize()\n", "\n", "csv_reader = csv.reader(file('kansai.csv'))\n", "\n", "data = []\n", "halfyear = [] # half-year moving average.\n", "halfyear2 = []\n", "halfyear3 = []\n", "i = 1\n", "for row in csv_reader:\n", " halfyear.append(int(row[2]))\n", " if len(halfyear) > 12:\n", " halfyear = halfyear[1:]\n", "\n", " data.append((i,\n", " int(row[2]),\n", " sum(halfyear) / len(halfyear),\n", " ))\n", " i = i + 1\n", "\n", "xaxis = axis.X(tic_interval = 12, label=\"Year Month\",\n", " format=lambda x:\n", " \"%i-%i\" % ((x-1)/12+2007, (x-1)%12+1))\n", "yaxis = axis.Y(tic_interval = 10, label=\"Attendees\")\n", "chart_object.set_defaults(area.T, size = (480, 300))\n", "ar = area.T(x_axis=xaxis, y_axis=yaxis, x_range=(1,12*NUM_YEARS),\n", " y_range=(0,None), legend = legend.T(loc=(30,240)))\n", "plot1 = line_plot.T(label=\"n. of attendees\", data=data, ycol=1)\n", "plot2 = line_plot.T(label=\"n. of attendees(1 yr avg)\", data=data, ycol=2)\n", "\n", "#ar.add_plot(plot, plot2, plot3, plot4, plot5, plot6)\n", "ar.add_plot(plot1, plot2)\n", "can = canvas.init(\"kansai.png\")\n", "ar.draw(can)\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0.0070921985815602835, 0, 0, 0, 0, 0, 0, 0, 0, 0.047619047619047616, 0.037037037037037035, 0.037037037037037035, 0, 0, 0, 0, 0, 0.023809523809523808, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.037037037037037035, 0, 0.018518518518518517, 0.03773584905660377, 0.03773584905660377, 0.015151515151515152, 0.06349206349206349, 0, 0, 0, 0.018518518518518517, 0, 0, 0 ]
53
0.007185
from __future__ import absolute_import from datetime import timedelta from requests import RequestException from django.core.exceptions import PermissionDenied from django.http import HttpResponseRedirect from django.urls import reverse from django.utils import timezone from allauth.exceptions import ImmediateHttpResponse from allauth.socialaccount import providers from allauth.socialaccount.helpers import ( complete_social_login, render_authentication_error, ) from allauth.socialaccount.models import SocialLogin, SocialToken from allauth.socialaccount.providers.base import ProviderException from allauth.socialaccount.providers.oauth2.client import ( OAuth2Client, OAuth2Error, ) from allauth.utils import build_absolute_uri, get_request_param from ..base import AuthAction, AuthError class OAuth2Adapter(object): expires_in_key = 'expires_in' supports_state = True redirect_uri_protocol = None access_token_method = 'POST' login_cancelled_error = 'access_denied' scope_delimiter = ' ' basic_auth = False headers = None def __init__(self, request): self.request = request def get_provider(self): return providers.registry.by_id(self.provider_id, self.request) def complete_login(self, request, app, access_token, **kwargs): """ Returns a SocialLogin instance """ raise NotImplementedError def get_callback_url(self, request, app): callback_url = reverse(self.provider_id + "_callback") protocol = self.redirect_uri_protocol return build_absolute_uri(request, callback_url, protocol) def parse_token(self, data): token = SocialToken(token=data['access_token']) token.token_secret = data.get('refresh_token', '') expires_in = data.get(self.expires_in_key, None) if expires_in: token.expires_at = timezone.now() + timedelta( seconds=int(expires_in)) return token class OAuth2View(object): @classmethod def adapter_view(cls, adapter): def view(request, *args, **kwargs): self = cls() self.request = request self.adapter = adapter(request) try: return self.dispatch(request, *args, **kwargs) except ImmediateHttpResponse as e: return e.response return view def get_client(self, request, app): callback_url = self.adapter.get_callback_url(request, app) provider = self.adapter.get_provider() scope = provider.get_scope(request) client = OAuth2Client(self.request, app.client_id, app.secret, self.adapter.access_token_method, self.adapter.access_token_url, callback_url, scope, scope_delimiter=self.adapter.scope_delimiter, headers=self.adapter.headers, basic_auth=self.adapter.basic_auth) return client class OAuth2LoginView(OAuth2View): def dispatch(self, request, *args, **kwargs): provider = self.adapter.get_provider() app = provider.get_app(self.request) client = self.get_client(request, app) action = request.GET.get('action', AuthAction.AUTHENTICATE) auth_url = self.adapter.authorize_url auth_params = provider.get_auth_params(request, action) client.state = SocialLogin.stash_state(request) try: return HttpResponseRedirect(client.get_redirect_url( auth_url, auth_params)) except OAuth2Error as e: return render_authentication_error( request, provider.id, exception=e) class OAuth2CallbackView(OAuth2View): def dispatch(self, request, *args, **kwargs): if 'error' in request.GET or 'code' not in request.GET: # Distinguish cancel from error auth_error = request.GET.get('error', None) if auth_error == self.adapter.login_cancelled_error: error = AuthError.CANCELLED else: error = AuthError.UNKNOWN return render_authentication_error( request, self.adapter.provider_id, error=error) app = self.adapter.get_provider().get_app(self.request) client = self.get_client(request, app) try: access_token = client.get_access_token(request.GET['code']) token = self.adapter.parse_token(access_token) token.app = app login = self.adapter.complete_login(request, app, token, response=access_token) login.token = token if self.adapter.supports_state: login.state = SocialLogin \ .verify_and_unstash_state( request, get_request_param(request, 'state')) else: login.state = SocialLogin.unstash_state(request) return complete_social_login(request, login) except (PermissionDenied, OAuth2Error, RequestException, ProviderException) as e: return render_authentication_error( request, self.adapter.provider_id, exception=e)
[ "from __future__ import absolute_import\n", "\n", "from datetime import timedelta\n", "from requests import RequestException\n", "\n", "from django.core.exceptions import PermissionDenied\n", "from django.http import HttpResponseRedirect\n", "from django.urls import reverse\n", "from django.utils import timezone\n", "\n", "from allauth.exceptions import ImmediateHttpResponse\n", "from allauth.socialaccount import providers\n", "from allauth.socialaccount.helpers import (\n", " complete_social_login,\n", " render_authentication_error,\n", ")\n", "from allauth.socialaccount.models import SocialLogin, SocialToken\n", "from allauth.socialaccount.providers.base import ProviderException\n", "from allauth.socialaccount.providers.oauth2.client import (\n", " OAuth2Client,\n", " OAuth2Error,\n", ")\n", "from allauth.utils import build_absolute_uri, get_request_param\n", "\n", "from ..base import AuthAction, AuthError\n", "\n", "\n", "class OAuth2Adapter(object):\n", " expires_in_key = 'expires_in'\n", " supports_state = True\n", " redirect_uri_protocol = None\n", " access_token_method = 'POST'\n", " login_cancelled_error = 'access_denied'\n", " scope_delimiter = ' '\n", " basic_auth = False\n", " headers = None\n", "\n", " def __init__(self, request):\n", " self.request = request\n", "\n", " def get_provider(self):\n", " return providers.registry.by_id(self.provider_id, self.request)\n", "\n", " def complete_login(self, request, app, access_token, **kwargs):\n", " \"\"\"\n", " Returns a SocialLogin instance\n", " \"\"\"\n", " raise NotImplementedError\n", "\n", " def get_callback_url(self, request, app):\n", " callback_url = reverse(self.provider_id + \"_callback\")\n", " protocol = self.redirect_uri_protocol\n", " return build_absolute_uri(request, callback_url, protocol)\n", "\n", " def parse_token(self, data):\n", " token = SocialToken(token=data['access_token'])\n", " token.token_secret = data.get('refresh_token', '')\n", " expires_in = data.get(self.expires_in_key, None)\n", " if expires_in:\n", " token.expires_at = timezone.now() + timedelta(\n", " seconds=int(expires_in))\n", " return token\n", "\n", "\n", "class OAuth2View(object):\n", " @classmethod\n", " def adapter_view(cls, adapter):\n", " def view(request, *args, **kwargs):\n", " self = cls()\n", " self.request = request\n", " self.adapter = adapter(request)\n", " try:\n", " return self.dispatch(request, *args, **kwargs)\n", " except ImmediateHttpResponse as e:\n", " return e.response\n", " return view\n", "\n", " def get_client(self, request, app):\n", " callback_url = self.adapter.get_callback_url(request, app)\n", " provider = self.adapter.get_provider()\n", " scope = provider.get_scope(request)\n", " client = OAuth2Client(self.request, app.client_id, app.secret,\n", " self.adapter.access_token_method,\n", " self.adapter.access_token_url,\n", " callback_url,\n", " scope,\n", " scope_delimiter=self.adapter.scope_delimiter,\n", " headers=self.adapter.headers,\n", " basic_auth=self.adapter.basic_auth)\n", " return client\n", "\n", "\n", "class OAuth2LoginView(OAuth2View):\n", " def dispatch(self, request, *args, **kwargs):\n", " provider = self.adapter.get_provider()\n", " app = provider.get_app(self.request)\n", " client = self.get_client(request, app)\n", " action = request.GET.get('action', AuthAction.AUTHENTICATE)\n", " auth_url = self.adapter.authorize_url\n", " auth_params = provider.get_auth_params(request, action)\n", " client.state = SocialLogin.stash_state(request)\n", " try:\n", " return HttpResponseRedirect(client.get_redirect_url(\n", " auth_url, auth_params))\n", " except OAuth2Error as e:\n", " return render_authentication_error(\n", " request,\n", " provider.id,\n", " exception=e)\n", "\n", "\n", "class OAuth2CallbackView(OAuth2View):\n", " def dispatch(self, request, *args, **kwargs):\n", " if 'error' in request.GET or 'code' not in request.GET:\n", " # Distinguish cancel from error\n", " auth_error = request.GET.get('error', None)\n", " if auth_error == self.adapter.login_cancelled_error:\n", " error = AuthError.CANCELLED\n", " else:\n", " error = AuthError.UNKNOWN\n", " return render_authentication_error(\n", " request,\n", " self.adapter.provider_id,\n", " error=error)\n", " app = self.adapter.get_provider().get_app(self.request)\n", " client = self.get_client(request, app)\n", " try:\n", " access_token = client.get_access_token(request.GET['code'])\n", " token = self.adapter.parse_token(access_token)\n", " token.app = app\n", " login = self.adapter.complete_login(request,\n", " app,\n", " token,\n", " response=access_token)\n", " login.token = token\n", " if self.adapter.supports_state:\n", " login.state = SocialLogin \\\n", " .verify_and_unstash_state(\n", " request,\n", " get_request_param(request, 'state'))\n", " else:\n", " login.state = SocialLogin.unstash_state(request)\n", " return complete_social_login(request, login)\n", " except (PermissionDenied,\n", " OAuth2Error,\n", " RequestException,\n", " ProviderException) as e:\n", " return render_authentication_error(\n", " request,\n", " self.adapter.provider_id,\n", " exception=e)\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
151
0
import os import plistlib import shutil from pkg_resources import resource_filename import py2app.bundletemplate from py2app.util import makedirs, mergecopy, mergetree, skipscm, make_exec def create_pluginbundle(destdir, name, extension='.plugin', module=py2app.bundletemplate, platform='MacOS', copy=mergecopy, mergetree=mergetree, condition=skipscm, plist={}): kw = module.plist_template.infoPlistDict( plist.get('CFBundleExecutable', name), plist) plugin = os.path.join(destdir, kw['CFBundleName'] + extension) contents = os.path.join(plugin, 'Contents') resources = os.path.join(contents, 'Resources') platdir = os.path.join(contents, platform) dirs = [contents, resources, platdir] plist = plistlib.Plist() plist.update(kw) plistPath = os.path.join(contents, 'Info.plist') if os.path.exists(plistPath): if plist != plistlib.Plist.fromFile(plistPath): for d in dirs: shutil.rmtree(d, ignore_errors=True) for d in dirs: makedirs(d) plist.write(plistPath) srcmain = module.setup.main() destmain = os.path.join(platdir, kw['CFBundleExecutable']) open(os.path.join(contents, 'PkgInfo'), 'w').write( kw['CFBundlePackageType'] + kw['CFBundleSignature'] ) copy(srcmain, destmain) make_exec(destmain) mergetree( resource_filename(module.__name__, 'lib'), resources, condition=condition, copyfn=copy, ) return plugin, plist if __name__ == '__main__': import sys create_pluginbundle('build', sys.argv[1])
[ "import os\n", "import plistlib\n", "import shutil\n", "from pkg_resources import resource_filename\n", "\n", "import py2app.bundletemplate\n", "from py2app.util import makedirs, mergecopy, mergetree, skipscm, make_exec\n", "\n", "def create_pluginbundle(destdir, name, extension='.plugin', module=py2app.bundletemplate,\n", " platform='MacOS', copy=mergecopy, mergetree=mergetree,\n", " condition=skipscm, plist={}):\n", " kw = module.plist_template.infoPlistDict(\n", " plist.get('CFBundleExecutable', name), plist)\n", " plugin = os.path.join(destdir, kw['CFBundleName'] + extension)\n", " contents = os.path.join(plugin, 'Contents')\n", " resources = os.path.join(contents, 'Resources')\n", " platdir = os.path.join(contents, platform)\n", " dirs = [contents, resources, platdir]\n", " plist = plistlib.Plist()\n", " plist.update(kw)\n", " plistPath = os.path.join(contents, 'Info.plist')\n", " if os.path.exists(plistPath):\n", " if plist != plistlib.Plist.fromFile(plistPath):\n", " for d in dirs:\n", " shutil.rmtree(d, ignore_errors=True)\n", " for d in dirs:\n", " makedirs(d)\n", " plist.write(plistPath)\n", " srcmain = module.setup.main()\n", " destmain = os.path.join(platdir, kw['CFBundleExecutable'])\n", " open(os.path.join(contents, 'PkgInfo'), 'w').write(\n", " kw['CFBundlePackageType'] + kw['CFBundleSignature']\n", " )\n", " copy(srcmain, destmain)\n", " make_exec(destmain)\n", " mergetree(\n", " resource_filename(module.__name__, 'lib'),\n", " resources,\n", " condition=condition,\n", " copyfn=copy,\n", " )\n", " return plugin, plist\n", "\n", "if __name__ == '__main__':\n", " import sys\n", " create_pluginbundle('build', sys.argv[1])\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0.022222222222222223, 0.015873015873015872, 0.02631578947368421, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.037037037037037035, 0, 0 ]
46
0.002205
#Compare MLRA Data Tool #A. Stephens #5/23/2014 Tool can compare 2 soil feature classes. Compare UPDATED SOILS to ORIGINAL SSURGO SOILS. It compares attributes and edited polygons. It reads attributes from the MUSYM column. For best results, the ORIGINAL soil polygon feature class should reside in a File Geodatabase. It can be in a different File Geodatabase than the UPDATED soil polygon feature class. Run this tool in an ArcMap Session. #9/18/2014 Added the dissolve tool and acres calculation field. #4/22/2016 Altered script to see difference in MLRA data test import arcpy arcpy.env.overwriteOutput = True inFC = arcpy.GetParameterAsText(0) #outFC = arcpy.GetParameterAsText(1) #outFCDISSOLVE = arcpy.GetParameterAsText(2) #Union update soils and original soils arcpy.Union_analysis(inFC, "union_compare_data", "ALL") #Make Feature Layer from Union soils arcpy.MakeFeatureLayer_management("union_compare_data", "union_compare_data_lyr") #Select Layer By Attribute NEW_SELECTION "MUSYM" <> "MUSYM_1" arcpy.SelectLayerByAttribute_management("union_compare_data_lyr", "NEW_SELECTION", ' "MLRASYM" <> "MLRASYM_1" ') #Copy Features arcpy.CopyFeatures_management ("union_compare_data_lyr", "outFC") dissolveFields = ["MLRASYM", "MLRASYM_1"] #Dissolve Features arcpy.Dissolve_management ("outFC", "COMPARE", dissolveFields) #Delete Features arcpy.Delete_management("union_compare_data") arcpy.Delete_management("outFC") #Add Field arcpy.AddField_management("COMPARE", "ACRES", "DOUBLE", ) #Calculate Field arcpy.CalculateField_management("COMPARE", "ACRES", '!Shape.area@ACRES!', "PYTHON_9.3", ) #print "Script Completed" print ('Script Completed')
[ "#Compare MLRA Data Tool\r\n", "#A. Stephens\r\n", "#5/23/2014 Tool can compare 2 soil feature classes. Compare UPDATED SOILS to ORIGINAL SSURGO SOILS. It compares attributes and edited polygons. It reads attributes from the MUSYM column. For best results, the ORIGINAL soil polygon feature class should reside in a File Geodatabase. It can be in a different File Geodatabase than the UPDATED soil polygon feature class. Run this tool in an ArcMap Session.\r\n", "#9/18/2014 Added the dissolve tool and acres calculation field.\r\n", "#4/22/2016 Altered script to see difference in MLRA data test\r\n", "\r\n", "import arcpy\r\n", "arcpy.env.overwriteOutput = True\r\n", "inFC = arcpy.GetParameterAsText(0)\r\n", "#outFC = arcpy.GetParameterAsText(1)\r\n", "#outFCDISSOLVE = arcpy.GetParameterAsText(2)\r\n", "\r\n", "\r\n", "#Union update soils and original soils\r\n", "arcpy.Union_analysis(inFC, \"union_compare_data\", \"ALL\")\r\n", "\r\n", "#Make Feature Layer from Union soils\r\n", "arcpy.MakeFeatureLayer_management(\"union_compare_data\", \"union_compare_data_lyr\")\r\n", "\r\n", "#Select Layer By Attribute NEW_SELECTION \"MUSYM\" <> \"MUSYM_1\"\r\n", "arcpy.SelectLayerByAttribute_management(\"union_compare_data_lyr\", \"NEW_SELECTION\", ' \"MLRASYM\" <> \"MLRASYM_1\" ')\r\n", "\r\n", "#Copy Features\r\n", "arcpy.CopyFeatures_management (\"union_compare_data_lyr\", \"outFC\")\r\n", "\r\n", "dissolveFields = [\"MLRASYM\", \"MLRASYM_1\"]\r\n", "#Dissolve Features\r\n", "arcpy.Dissolve_management (\"outFC\", \"COMPARE\", dissolveFields)\r\n", "\r\n", "\r\n", "#Delete Features\r\n", "arcpy.Delete_management(\"union_compare_data\")\r\n", "arcpy.Delete_management(\"outFC\")\r\n", "\r\n", "#Add Field\r\n", "\r\n", "arcpy.AddField_management(\"COMPARE\", \"ACRES\", \"DOUBLE\", )\r\n", "\r\n", "#Calculate Field\r\n", "\r\n", "arcpy.CalculateField_management(\"COMPARE\", \"ACRES\", '!Shape.area@ACRES!', \"PYTHON_9.3\", )\r\n", "\r\n", "\r\n", "#print \"Script Completed\"\r\n", "print ('Script Completed')\r\n" ]
[ 0.04, 0.07142857142857142, 0.0049261083743842365, 0.015384615384615385, 0.015873015873015872, 0, 0, 0, 0, 0.02631578947368421, 0.021739130434782608, 0, 0, 0.025, 0, 0, 0.02631578947368421, 0.012048192771084338, 0, 0.015873015873015872, 0.008771929824561403, 0, 0.0625, 0.014925373134328358, 0, 0, 0.05, 0.015625, 0, 0, 0.05555555555555555, 0, 0, 0, 0.08333333333333333, 0, 0, 0, 0.05555555555555555, 0, 0.01098901098901099, 0, 0, 0.037037037037037035, 0.03571428571428571 ]
45
0.015665
''' Finds torrents on piratebay from a VIP user(green crossbones) and returns the torrent info in a dictionary The dict returned has the form {torrent#: [ name, time, size, uploader, magnet_link]} ''' from bs4 import BeautifulSoup from PyQt4 import QtGui import requests def torrentSearch(show_name, season_episode): url = 'https://thepiratebay.org/search/' + show_name + ' ' + season_episode webpage = requests.get(url) html = webpage.text body_soup = BeautifulSoup(html, 'html.parser') links = body_soup.find_all('tr') torrents = {} name = '' magnet_link = '' torrent_index = 0 for link in links: link_soup = BeautifulSoup(str(link), 'html.parser') #Get torrent name title = link_soup.find_all('div',{'class':'detName'}) for t in title: trusted = link_soup.find_all('img',{'title':'VIP'}) if trusted != []: name = t.text.strip() #information tagline = link_soup.find_all('font',{'class': 'detDesc'}) for i in tagline: info = i.text temp = info.split(',') time = temp[0] size = temp[1] user = temp[2] #magnet link magnets = link_soup.find_all('a',{'title':'Download this torrent using magnet'}) for m in magnets: magnet_link = str(m['href']) #print(magnet_link) if all(x in name.lower() for x in [season_episode.lower(), show_name.lower()]): torrents.update({torrent_index : [name,time,size,user,magnet_link] }) torrent_index += 1 return torrents
[ "'''\n", "Finds torrents on piratebay from a VIP user(green crossbones) and\n", "returns the torrent info in a dictionary\n", "The dict returned has the form {torrent#: [ name, time, size, uploader, magnet_link]}\n", "'''\n", "from bs4 import BeautifulSoup\n", "from PyQt4 import QtGui\n", "import requests\n", "\n", "def torrentSearch(show_name, season_episode):\n", " url = 'https://thepiratebay.org/search/' + show_name + ' ' + season_episode\n", "\n", " webpage = requests.get(url)\n", " html = webpage.text\n", "\n", "\n", " body_soup = BeautifulSoup(html, 'html.parser')\n", " links = body_soup.find_all('tr')\n", "\n", "\n", "\n", " torrents = {}\n", " name = ''\n", " magnet_link = ''\n", " torrent_index = 0\n", "\n", "\n", " for link in links:\n", "\n", " link_soup = BeautifulSoup(str(link), 'html.parser')\n", "\n", " #Get torrent name\n", " title = link_soup.find_all('div',{'class':'detName'})\n", " for t in title:\n", "\n", " trusted = link_soup.find_all('img',{'title':'VIP'})\n", " if trusted != []:\n", "\n", " name = t.text.strip()\n", "\n", "\n", " #information\n", " tagline = link_soup.find_all('font',{'class': 'detDesc'})\n", " for i in tagline:\n", " info = i.text\n", "\n", " temp = info.split(',')\n", " time = temp[0]\n", " size = temp[1]\n", " user = temp[2]\n", "\n", "\n", " #magnet link\n", " magnets = link_soup.find_all('a',{'title':'Download this torrent using magnet'})\n", " for m in magnets:\n", " magnet_link = str(m['href'])\n", " #print(magnet_link)\n", "\n", "\n", " if all(x in name.lower() for x in [season_episode.lower(), show_name.lower()]):\n", "\n", " torrents.update({torrent_index : [name,time,size,user,magnet_link] })\n", " torrent_index += 1\n", "\n", "\n", "\n", " return torrents\n" ]
[ 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0.021739130434782608, 0, 0, 0, 0, 0, 0, 0.0196078431372549, 0, 0, 0, 0, 0.05555555555555555, 0, 0, 0, 0, 0, 0.043478260869565216, 0, 0, 0, 0.038461538461538464, 0.03225806451612903, 0, 0, 0.03125, 0, 0, 0, 0, 0, 0.06896551724137931, 0.013513513513513514, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.06896551724137931, 0.030927835051546393, 0, 0, 0.025, 0, 0, 0.020833333333333332, 0, 0.07777777777777778, 0, 0, 0, 0, 0.05 ]
67
0.009104
#!/usr/bin/env python2.7 # WebDAV-Kerberos - Kerberised WebDAV client library # Copyright (c) 2013 Bob Carroll (bob.carroll@alum.rit.edu) # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA import davlib import kerberos import Cookie class Krb5Error(Exception): def __init__(self, message): self.message = message def __str__(self): return 'Kerberos authentication failed with error: %s' % (self.message, ) class Krb5DAV(davlib.DAV): def __init__(self, *args, **kwargs): self.__spn = 'http@%s' % (args[0], ) self.__upn = kwargs.pop('principal') if 'principal' in kwargs else '' self.__spnego = False self.__cookies = Cookie.SimpleCookie() self.__persistauth = False apply(davlib.DAV.__init__, (self, ) + args, kwargs) def __probe_mechanisms(self): if not self.__spnego: response = davlib.DAV._request(self, 'OPTIONS', '/') authstr = response.getheader('www-authenticate') mechs = [s.strip() for s in authstr.split(',')] self.__spnego = 'Negotiate' in mechs self.close() if not self.__spnego: raise Krb5Error('Server does not support Kerberos authentication') def __challenge(self, gssctx, blob): try: result = kerberos.authGSSClientStep(gssctx, blob) except kerberos.GSSError as ex: raise Krb5Error('%s (%s)' % (ex[0][0], ex[1][0])) except kerberos.KrbError as ex: raise Krb5Error(ex[0]) if result == kerberos.AUTH_GSS_COMPLETE: return (True, '') try: response = kerberos.authGSSClientResponse(gssctx) except kerberos.GSSError as ex: raise Krb5Error('%s (%s)' % (ex[0][0], ex[1][0])) except kerberos.KrbError as ex: raise Krb5Error(ex[0]) return (False, response) def __store_cookies(self, response): cookiestr = response.getheader('set-cookie') if not cookiestr is None: self.__cookies.load(cookiestr) def __request_authenticate(self, method, url, body, extra_hdrs): self.__probe_mechanisms() try: result, gssctx = kerberos.authGSSClientInit(self.__spn, principal=self.__upn) except kerberos.GSSError as ex: raise Krb5Error('%s (%s)' % (ex[0][0], ex[1][0])) response = None blob = '' while True: try: result, blob = self.__challenge(gssctx, blob) except Krb5Error as ex: kerberos.authGSSClientClean(gssctx) raise ex if result: self.__upn = kerberos.authGSSClientUserName(gssctx) break self.close() extra_hdrs['Authorization'] = 'Negotiate %s' % (blob, ) response = davlib.DAV._request(self, method, url, body, extra_hdrs) self.__store_cookies(response) authstr = response.getheader('www-authenticate') (mech, blob) = authstr.split(' ') persistauth = response.getheader('persistent-auth') self.__persistauth = persistauth == 'true' kerberos.authGSSClientClean(gssctx) return response def _request(self, method, url, body=None, extra_hdrs={}): cookies = Cookie.SimpleCookie() cookies.load(', '.join([self.__cookies[c].OutputString() for c in self.__cookies])) if 'Cookie' in extra_hdrs: cookies.load(extra_hdrs['Cookie']) if len(cookies) > 0: extra_hdrs['Cookie'] = ', '.join(['%s=%s' % (c, cookies[c].value) for c in cookies]) if self.__persistauth: return davlib.DAV._request(self, method, url, body, extra_hdrs) else: return self.__request_authenticate(method, url, body, extra_hdrs) def whoami(self): return self.__upn
[ "#!/usr/bin/env python2.7\n", "\n", "# WebDAV-Kerberos - Kerberised WebDAV client library\n", "# Copyright (c) 2013 Bob Carroll (bob.carroll@alum.rit.edu)\n", "# \n", "# This program is free software; you can redistribute it and/or modify\n", "# it under the terms of the GNU General Public License as published by\n", "# the Free Software Foundation; either version 2 of the License, or\n", "# (at your option) any later version.\n", "#\n", "# This program is distributed in the hope that it will be useful,\n", "# but WITHOUT ANY WARRANTY; without even the implied warranty of\n", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n", "# GNU General Public License for more details.\n", "#\n", "# You should have received a copy of the GNU General Public License\n", "# along with this program; if not, write to the Free Software\n", "# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n", "\n", "import davlib\n", "import kerberos\n", "import Cookie\n", "\n", "class Krb5Error(Exception):\n", "\n", " def __init__(self, message):\n", " self.message = message\n", "\n", " def __str__(self):\n", " return 'Kerberos authentication failed with error: %s' % (self.message, )\n", "\n", "class Krb5DAV(davlib.DAV):\n", "\n", " def __init__(self, *args, **kwargs):\n", " self.__spn = 'http@%s' % (args[0], )\n", " self.__upn = kwargs.pop('principal') if 'principal' in kwargs else ''\n", " self.__spnego = False\n", " self.__cookies = Cookie.SimpleCookie()\n", " self.__persistauth = False\n", " apply(davlib.DAV.__init__, (self, ) + args, kwargs)\n", "\n", " def __probe_mechanisms(self):\n", " if not self.__spnego:\n", " response = davlib.DAV._request(self, 'OPTIONS', '/')\n", " authstr = response.getheader('www-authenticate')\n", " mechs = [s.strip() for s in authstr.split(',')]\n", " self.__spnego = 'Negotiate' in mechs\n", " self.close()\n", "\n", " if not self.__spnego:\n", " raise Krb5Error('Server does not support Kerberos authentication')\n", "\n", " def __challenge(self, gssctx, blob):\n", " try:\n", " result = kerberos.authGSSClientStep(gssctx, blob)\n", " except kerberos.GSSError as ex:\n", " raise Krb5Error('%s (%s)' % (ex[0][0], ex[1][0]))\n", " except kerberos.KrbError as ex:\n", " raise Krb5Error(ex[0])\n", "\n", " if result == kerberos.AUTH_GSS_COMPLETE:\n", " return (True, '')\n", "\n", " try:\n", " response = kerberos.authGSSClientResponse(gssctx)\n", " except kerberos.GSSError as ex:\n", " raise Krb5Error('%s (%s)' % (ex[0][0], ex[1][0]))\n", " except kerberos.KrbError as ex:\n", " raise Krb5Error(ex[0])\n", "\n", " return (False, response)\n", "\n", " def __store_cookies(self, response):\n", " cookiestr = response.getheader('set-cookie')\n", "\n", " if not cookiestr is None:\n", " self.__cookies.load(cookiestr)\n", "\n", " def __request_authenticate(self, method, url, body, extra_hdrs):\n", " self.__probe_mechanisms()\n", "\n", " try:\n", " result, gssctx = kerberos.authGSSClientInit(self.__spn, principal=self.__upn)\n", " except kerberos.GSSError as ex:\n", " raise Krb5Error('%s (%s)' % (ex[0][0], ex[1][0]))\n", " \n", " response = None\n", " blob = ''\n", "\n", " while True:\n", " try:\n", " result, blob = self.__challenge(gssctx, blob)\n", " except Krb5Error as ex:\n", " kerberos.authGSSClientClean(gssctx)\n", " raise ex\n", "\n", " if result:\n", " self.__upn = kerberos.authGSSClientUserName(gssctx)\n", " break\n", "\n", " self.close()\n", " extra_hdrs['Authorization'] = 'Negotiate %s' % (blob, )\n", " response = davlib.DAV._request(self, method, url, body, extra_hdrs)\n", "\n", " self.__store_cookies(response)\n", " authstr = response.getheader('www-authenticate')\n", " (mech, blob) = authstr.split(' ')\n", "\n", " persistauth = response.getheader('persistent-auth')\n", " self.__persistauth = persistauth == 'true'\n", "\n", " kerberos.authGSSClientClean(gssctx)\n", " return response\n", "\n", " def _request(self, method, url, body=None, extra_hdrs={}):\n", " cookies = Cookie.SimpleCookie()\n", " cookies.load(', '.join([self.__cookies[c].OutputString() for c in self.__cookies]))\n", "\n", " if 'Cookie' in extra_hdrs:\n", " cookies.load(extra_hdrs['Cookie'])\n", "\n", " if len(cookies) > 0:\n", " extra_hdrs['Cookie'] = ', '.join(['%s=%s' % (c, cookies[c].value) for c in cookies])\n", "\n", " if self.__persistauth:\n", " return davlib.DAV._request(self, method, url, body, extra_hdrs)\n", " else:\n", " return self.__request_authenticate(method, url, body, extra_hdrs)\n", "\n", " def whoami(self):\n", " return self.__upn\n", "\n" ]
[ 0, 0, 0, 0, 0.3333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03571428571428571, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0.037037037037037035, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.029411764705882353, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0.07692307692307693, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.024691358024691357, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010869565217391304, 0, 0, 0, 0, 0, 0.010309278350515464, 0, 0, 0, 0, 0, 0, 0, 0, 1 ]
132
0.011982
""" Wind Turbine Company - 2013 Author: Stephan Rayner Email: stephan.rayner@gmail.com """ from HIL.controller.Base_Controller import Base_Controller class Shutdown(Base_Controller): """docstring for Shutdown""" def __init__(self): super(Shutdown, self).__init__() self.controler = "mcc" self.resetVariable = "@GV.HWB_ResetFault" self.List=["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "46", "44", "48"] def read(self, what2read, return_onlyHigh=False): #Working 21/03/2013 ; 15:59 if(type(what2read) != list): what2read = list(what2read) SD_List = [] for SD in what2read: SD_List.append(self._SD_readformat(SD)) Output = self.mcc.read(SD_List) if(return_onlyHigh): keysToPull = [] for index, item in enumerate(SD_List): if(Output.items()[index][1] == "0"): keysToPull.append(Output.keys()[index]) for key in keysToPull: del Output[key] return Output def bypass(self, what2bypass): #Working 21/03/2013 ; 14:03 if(type(what2bypass) != list): what2bypass = [what2bypass] for SD in what2bypass: self.mcc.raw_write(self.mccip, self._SD_bypassformat(SD), "1") def clear_bypass(self, what2unbypass = None): if(what2unbypass == None): what2unbypass = self.List if(type(what2unbypass) != list): what2unbypass = [what2unbypass] for SD in what2unbypass: self.mcc.raw_write(self.mccip, self._SD_bypassformat(SD), "0") def reset(self): self.mcc.raw_write(self.mccip, self.resetVariable, "1") self.clear_bypass() def _SD_readformat(self, SD): #Looks like SD_02 for example tmp = int(SD) if(tmp < 10): SD = ("@GV.SD_0" + str(SD)) else: SD = ("@GV.SD_" + str(SD)) return SD def _SD_bypassformat(self, SD): #Looks like BYP_SD_02 for example tmp = int(SD) if(tmp < 10): SD = ("@GV.BYP_SD_0" + str(SD)) else: SD = ("@GV.BYP_SD_" + str(SD)) return SD class Alarm(Base_Controller): """docstring for Alarm""" def __init__(self): super(Alarm, self).__init__() self.controler = "mcc" self.resetVariable = "@GV.HWB_ResetFault" self.List=["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34"] def read(self, what2read, return_onlyHigh=False): #Working 21/03/2013 ; 15:59 if(type(what2read) != list): what2read = list(what2read) ALM_List = [] for ALM in what2read: ALM_List.append(self._ALM_readformat(ALM)) Output = self.mcc.read(ALM_List) if(return_onlyHigh): keysToPull = [] for index, item in enumerate(ALM_List): if(Output.items()[index][1] == "0"): keysToPull.append(Output.keys()[index]) for key in keysToPull: del Output[key] return Output def bypass(self, what2bypass): #Working 21/03/2013 ; 14:03 if(type(what2bypass) != list): what2bypass = [what2bypass] for ALM in what2bypass: self.mcc.raw_write(self.mccip, self._ALM_bypassformat(ALM), "1") def clear_bypass(self, what2unbypass = None): if(what2unbypass == None): what2unbypass = self.List if(type(what2unbypass) != list): what2unbypass = [what2unbypass] for ALM in what2unbypass: self.mcc.raw_write(self.mccip, self._ALM_bypassformat(ALM), "0") def reset(self): self.mcc.raw_write(self.mccip, self.resetVariable, "1") self.clear_bypass() def _ALM_readformat(self, ALM): #Looks like ALM_02 for example tmp = int(ALM) if(tmp < 10): ALM = ("@GV.ALM_0" + str(ALM)) else: ALM = ("@GV.ALM_" + str(ALM)) return ALM def _ALM_bypassformat(self, ALM): #Looks like BYP_ALM_02 for example tmp = int(ALM) if(tmp < 10): ALM = ("@GV.BYP_ALM_0" + str(ALM)) else: ALM = ("@GV.BYP_ALM_" + str(ALM)) return ALM
[ "\"\"\"\n", "Wind Turbine Company - 2013\n", "\n", "Author: Stephan Rayner\n", "Email: stephan.rayner@gmail.com\n", "\"\"\"\n", "from HIL.controller.Base_Controller import Base_Controller\n", "\n", "class Shutdown(Base_Controller):\n", " \"\"\"docstring for Shutdown\"\"\"\n", " def __init__(self):\n", " super(Shutdown, self).__init__()\n", " self.controler = \"mcc\"\n", " self.resetVariable = \"@GV.HWB_ResetFault\"\n", "\n", " self.List=[\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"11\", \n", " \"13\", \"14\", \"15\", \"16\", \"17\", \"18\", \"19\", \"20\", \"21\", \"22\", \"23\", \"24\", \"25\", \"26\", \n", " \"27\", \"28\", \"29\", \"30\", \"31\", \"32\", \"33\", \"34\", \"35\", \"36\", \"37\", \"38\", \"39\", \"40\", \n", " \"41\", \"42\", \"46\", \"44\", \"48\"]\n", "\n", "\n", " def read(self, what2read, return_onlyHigh=False):\n", " #Working 21/03/2013 ; 15:59\n", " if(type(what2read) != list):\n", " what2read = list(what2read)\n", " SD_List = []\n", " for SD in what2read:\n", " SD_List.append(self._SD_readformat(SD))\n", "\n", " Output = self.mcc.read(SD_List)\n", "\n", " if(return_onlyHigh):\n", " keysToPull = []\n", " for index, item in enumerate(SD_List):\n", " if(Output.items()[index][1] == \"0\"):\n", " keysToPull.append(Output.keys()[index])\n", " for key in keysToPull:\n", " del Output[key]\n", "\n", " return Output\n", "\n", " def bypass(self, what2bypass):\n", " #Working 21/03/2013 ; 14:03\n", " if(type(what2bypass) != list):\n", " what2bypass = [what2bypass]\n", " for SD in what2bypass:\n", " self.mcc.raw_write(self.mccip, self._SD_bypassformat(SD), \"1\")\n", "\n", " def clear_bypass(self, what2unbypass = None):\n", " if(what2unbypass == None):\n", " what2unbypass = self.List\n", " if(type(what2unbypass) != list):\n", " what2unbypass = [what2unbypass]\n", " for SD in what2unbypass:\n", " self.mcc.raw_write(self.mccip, self._SD_bypassformat(SD), \"0\") \n", "\n", " def reset(self):\n", " self.mcc.raw_write(self.mccip, self.resetVariable, \"1\")\n", " self.clear_bypass()\n", "\n", " def _SD_readformat(self, SD):\n", " #Looks like SD_02 for example\n", " tmp = int(SD)\n", " if(tmp < 10):\n", " SD = (\"@GV.SD_0\" + str(SD))\n", " else:\n", " SD = (\"@GV.SD_\" + str(SD))\n", " return SD\n", "\n", " def _SD_bypassformat(self, SD):\n", " #Looks like BYP_SD_02 for example\n", " tmp = int(SD)\n", " if(tmp < 10):\n", " SD = (\"@GV.BYP_SD_0\" + str(SD))\n", " else:\n", " SD = (\"@GV.BYP_SD_\" + str(SD))\n", " return SD\n", "\n", "\n", "class Alarm(Base_Controller):\n", " \"\"\"docstring for Alarm\"\"\"\n", " def __init__(self):\n", " super(Alarm, self).__init__()\n", " self.controler = \"mcc\"\n", " self.resetVariable = \"@GV.HWB_ResetFault\"\n", "\n", " self.List=[\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"11\", \"12\", \n", " \"13\", \"14\", \"15\", \"16\", \"17\", \"18\", \"19\", \"20\", \"21\", \"22\", \"23\", \"24\", \"25\", \"26\", \n", " \"27\", \"28\", \"29\", \"30\", \"31\", \"32\", \"33\", \"34\"]\n", "\n", " def read(self, what2read, return_onlyHigh=False):\n", " #Working 21/03/2013 ; 15:59\n", " if(type(what2read) != list):\n", " what2read = list(what2read)\n", "\n", " ALM_List = []\n", " for ALM in what2read:\n", " ALM_List.append(self._ALM_readformat(ALM))\n", " Output = self.mcc.read(ALM_List)\n", "\n", " if(return_onlyHigh):\n", " keysToPull = []\n", " for index, item in enumerate(ALM_List):\n", " if(Output.items()[index][1] == \"0\"):\n", " keysToPull.append(Output.keys()[index])\n", " for key in keysToPull:\n", " del Output[key]\n", "\n", " return Output\n", "\n", " def bypass(self, what2bypass):\n", " #Working 21/03/2013 ; 14:03\n", " if(type(what2bypass) != list):\n", " what2bypass = [what2bypass]\n", " for ALM in what2bypass:\n", " self.mcc.raw_write(self.mccip, self._ALM_bypassformat(ALM), \"1\")\n", "\n", " def clear_bypass(self, what2unbypass = None):\n", " if(what2unbypass == None):\n", " what2unbypass = self.List\n", " if(type(what2unbypass) != list):\n", " what2unbypass = [what2unbypass]\n", " for ALM in what2unbypass:\n", " self.mcc.raw_write(self.mccip, self._ALM_bypassformat(ALM), \"0\") \n", "\n", " def reset(self):\n", " self.mcc.raw_write(self.mccip, self.resetVariable, \"1\")\n", " self.clear_bypass()\n", "\n", " def _ALM_readformat(self, ALM):\n", " #Looks like ALM_02 for example\n", " tmp = int(ALM)\n", " if(tmp < 10):\n", " ALM = (\"@GV.ALM_0\" + str(ALM))\n", " else:\n", " ALM = (\"@GV.ALM_\" + str(ALM))\n", " return ALM\n", "\n", " def _ALM_bypassformat(self, ALM):\n", " #Looks like BYP_ALM_02 for example\n", " tmp = int(ALM)\n", " if(tmp < 10):\n", " ALM = (\"@GV.BYP_ALM_0\" + str(ALM))\n", " else:\n", " ALM = (\"@GV.BYP_ALM_\" + str(ALM))\n", " return ALM" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0.030303030303030304, 0, 0, 0, 0, 0, 0, 0.02564102564102564, 0.03225806451612903, 0.03225806451612903, 0.02631578947368421, 0, 0, 0.018518518518518517, 0.027777777777777776, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.027777777777777776, 0, 0, 0, 0, 0, 0.04, 0.02857142857142857, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0.02631578947368421, 0, 0, 0, 0, 0, 0, 0, 0, 0.023809523809523808, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03571428571428571, 0.03225806451612903, 0.017857142857142856, 0, 0, 0.027777777777777776, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.027777777777777776, 0, 0, 0, 0, 0, 0.04, 0.02857142857142857, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0.02564102564102564, 0, 0, 0, 0, 0, 0, 0, 0, 0.023255813953488372, 0, 0, 0, 0, 0, 0.05555555555555555 ]
146
0.004642
# This file is part of Shuup. # # Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved. # # This source code is licensed under the OSL-3.0 license found in the # LICENSE file in the root directory of this source tree. from django.conf import settings from django.conf.urls import include, url from django.conf.urls.static import static from django.contrib import admin from shuup.front.views.checkout import SinglePageCheckoutView class SinglePageCheckoutViewWithLoginAndRegister(SinglePageCheckoutView): initial_phase = "checkout_method" phase_specs = [ "shuup.front.checkout.checkout_method:CheckoutMethodPhase", "shuup.front.checkout.checkout_method:RegisterPhase", "shuup.front.checkout.addresses:AddressesPhase", "shuup.front.checkout.methods:MethodsPhase", "shuup.front.checkout.methods:ShippingMethodPhase", "shuup.front.checkout.methods:PaymentMethodPhase", "shuup.front.checkout.confirm:ConfirmPhase", ] empty_phase_spec = "shuup.front.checkout.empty:EmptyPhase" urlpatterns = [ url(r'^checkout/$', SinglePageCheckoutViewWithLoginAndRegister.as_view(), name='checkout'), url(r'^checkout/(?P<phase>.+)/$', SinglePageCheckoutViewWithLoginAndRegister.as_view(), name='checkout'), url(r'^admin/', include(admin.site.urls)), url(r'^sa/', include('shuup.admin.urls', namespace="shuup_admin", app_name="shuup_admin")), url(r'^', include('shuup.front.urls', namespace="shuup", app_name="shuup")), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
[ "# This file is part of Shuup.\n", "#\n", "# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.\n", "#\n", "# This source code is licensed under the OSL-3.0 license found in the\n", "# LICENSE file in the root directory of this source tree.\n", "from django.conf import settings\n", "from django.conf.urls import include, url\n", "from django.conf.urls.static import static\n", "from django.contrib import admin\n", "\n", "from shuup.front.views.checkout import SinglePageCheckoutView\n", "\n", "\n", "class SinglePageCheckoutViewWithLoginAndRegister(SinglePageCheckoutView):\n", " initial_phase = \"checkout_method\"\n", " phase_specs = [\n", " \"shuup.front.checkout.checkout_method:CheckoutMethodPhase\",\n", " \"shuup.front.checkout.checkout_method:RegisterPhase\",\n", " \"shuup.front.checkout.addresses:AddressesPhase\",\n", " \"shuup.front.checkout.methods:MethodsPhase\",\n", " \"shuup.front.checkout.methods:ShippingMethodPhase\",\n", " \"shuup.front.checkout.methods:PaymentMethodPhase\",\n", " \"shuup.front.checkout.confirm:ConfirmPhase\",\n", " ]\n", " empty_phase_spec = \"shuup.front.checkout.empty:EmptyPhase\"\n", "\n", "\n", "urlpatterns = [\n", " url(r'^checkout/$', SinglePageCheckoutViewWithLoginAndRegister.as_view(), name='checkout'),\n", " url(r'^checkout/(?P<phase>.+)/$', SinglePageCheckoutViewWithLoginAndRegister.as_view(), name='checkout'),\n", " url(r'^admin/', include(admin.site.urls)),\n", " url(r'^sa/', include('shuup.admin.urls', namespace=\"shuup_admin\", app_name=\"shuup_admin\")),\n", " url(r'^', include('shuup.front.urls', namespace=\"shuup\", app_name=\"shuup\")),\n", "] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010416666666666666, 0.00909090909090909, 0, 0.010416666666666666, 0.012345679012345678, 0 ]
35
0.001208
# # This file is part of Evergreen. See the NOTICE for more information. # import pyuv import evergreen from evergreen.core.utils import Result from evergreen.io import errno from evergreen.io.stream import BaseStream, StreamConnection, StreamServer from evergreen.log import log __all__ = ['PipeServer', 'PipeClient', 'PipeConnection', 'PipeStream', 'PipeError'] PipeError = pyuv.error.PipeError class BasePipeStream(BaseStream): error_cls = PipeError def __init__(self, handle): super(BasePipeStream, self).__init__() self._handle = handle class PipeStream(BasePipeStream): def __init__(self): loop = evergreen.current.loop handle = pyuv.Pipe(loop._loop) super(PipeStream, self).__init__(handle) def open(self, fd): self._handle.open(fd) self._set_connected() class PipeConnection(BasePipeStream, StreamConnection): pass class PipeClient(BasePipeStream): def __init__(self): loop = evergreen.current.loop handle = pyuv.Pipe(loop._loop) super(PipeClient, self).__init__(handle) self._connect_result = Result() def connect(self, target): if self._connected: raise PipeError('already connected') with self._connect_result: try: self._handle.connect(target, self.__connect_cb) except PipeError: self.close() raise try: self._connect_result.get() except PipeError: self.close() raise self._set_connected() def __connect_cb(self, handle, error): if error is not None: self._connect_result.set_exception(PipeError(error, errno.strerror(error))) else: self._connect_result.set_value(None) class PipeServer(StreamServer): connection_cls = PipeConnection error_cls = PipeError def __init__(self): super(PipeServer, self).__init__() loop = evergreen.current.loop self._handle = pyuv.Pipe(loop._loop) self._name = None @property def pipename(self): self._check_closed() return self._name def _bind(self, name): self._handle.bind(name) self._name = name def _serve(self, backlog): self._handle.listen(self.__listen_cb, backlog) def _close(self): self._handle.close() def __listen_cb(self, handle, error): if error is not None: log.debug('listen failed: %d %s', error, errno.strerror(error)) return pipe_handle = pyuv.Pipe(self._handle.loop) try: self._handle.accept(pipe_handle) except PipeError as e: log.debug('accept failed: %d %s', e.args[0], e.args[1]) pipe_handle.close() else: conn = self.connection_cls(pipe_handle) conn._set_accepted(self) self.handle_connection(conn)
[ "#\n", "# This file is part of Evergreen. See the NOTICE for more information.\n", "#\n", "\n", "import pyuv\n", "\n", "import evergreen\n", "from evergreen.core.utils import Result\n", "from evergreen.io import errno\n", "from evergreen.io.stream import BaseStream, StreamConnection, StreamServer\n", "from evergreen.log import log\n", "\n", "__all__ = ['PipeServer', 'PipeClient', 'PipeConnection', 'PipeStream', 'PipeError']\n", "\n", "\n", "PipeError = pyuv.error.PipeError\n", "\n", "\n", "class BasePipeStream(BaseStream):\n", " error_cls = PipeError\n", "\n", " def __init__(self, handle):\n", " super(BasePipeStream, self).__init__()\n", " self._handle = handle\n", "\n", "\n", "class PipeStream(BasePipeStream):\n", "\n", " def __init__(self):\n", " loop = evergreen.current.loop\n", " handle = pyuv.Pipe(loop._loop)\n", " super(PipeStream, self).__init__(handle)\n", "\n", " def open(self, fd):\n", " self._handle.open(fd)\n", " self._set_connected()\n", "\n", "\n", "class PipeConnection(BasePipeStream, StreamConnection):\n", " pass\n", "\n", "\n", "class PipeClient(BasePipeStream):\n", "\n", " def __init__(self):\n", " loop = evergreen.current.loop\n", " handle = pyuv.Pipe(loop._loop)\n", " super(PipeClient, self).__init__(handle)\n", " self._connect_result = Result()\n", "\n", " def connect(self, target):\n", " if self._connected:\n", " raise PipeError('already connected')\n", " with self._connect_result:\n", " try:\n", " self._handle.connect(target, self.__connect_cb)\n", " except PipeError:\n", " self.close()\n", " raise\n", " try:\n", " self._connect_result.get()\n", " except PipeError:\n", " self.close()\n", " raise\n", " self._set_connected()\n", "\n", " def __connect_cb(self, handle, error):\n", " if error is not None:\n", " self._connect_result.set_exception(PipeError(error, errno.strerror(error)))\n", " else:\n", " self._connect_result.set_value(None)\n", "\n", "\n", "class PipeServer(StreamServer):\n", " connection_cls = PipeConnection\n", " error_cls = PipeError\n", "\n", " def __init__(self):\n", " super(PipeServer, self).__init__()\n", " loop = evergreen.current.loop\n", " self._handle = pyuv.Pipe(loop._loop)\n", " self._name = None\n", "\n", " @property\n", " def pipename(self):\n", " self._check_closed()\n", " return self._name\n", "\n", " def _bind(self, name):\n", " self._handle.bind(name)\n", " self._name = name\n", "\n", " def _serve(self, backlog):\n", " self._handle.listen(self.__listen_cb, backlog)\n", "\n", " def _close(self):\n", " self._handle.close()\n", "\n", " def __listen_cb(self, handle, error):\n", " if error is not None:\n", " log.debug('listen failed: %d %s', error, errno.strerror(error))\n", " return\n", " pipe_handle = pyuv.Pipe(self._handle.loop)\n", " try:\n", " self._handle.accept(pipe_handle)\n", " except PipeError as e:\n", " log.debug('accept failed: %d %s', e.args[0], e.args[1])\n", " pipe_handle.close()\n", " else:\n", " conn = self.connection_cls(pipe_handle)\n", " conn._set_accepted(self)\n", " self.handle_connection(conn)\n", "\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ]
113
0.009055
""":mod:`cliche.web.social.twitter` --- Twitter Support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Cliche provides Twitter login/join to use our service. It widely uses Flask-OAuthlib_ as its OAuth framework. .. _Flask-OAuthlib: https://flask-oauthlib.readthedocs.org/ """ import collections import datetime import enum from flask import Blueprint, flash, redirect, request from flask import session as flask_session from flask import url_for from sqlalchemy.orm.exc import NoResultFound from werkzeug.routing import BaseConverter, ValidationError from ...credentials import TwitterCredential from ...user import User from ..db import session as sa_session from .provider import twitter __all__ = ('OAuthVendorConverter', 'Vendor', 'Version', 'login', 'oauth_app', 'oauth_authorized', 'vendors') oauth_app = Blueprint('oauth', __name__) Vendor = collections.namedtuple( 'Vendor', ['name', 'credential_table', 'oauth_version', 'oauth_client', 'key_names'] ) class Version(enum.Enum): oauth1 = 1 oauth2 = 2 vendors = [ Vendor('twitter', TwitterCredential, Version.oauth1, twitter, ('screen_name', 'user_id')) ] @oauth_app.route('/login/<oauth_vendor:vendor>/') def login(vendor): """Login.""" if 'logged_id' in flask_session: return redirect(url_for('index')) return vendor.oauth_client.authorize( callback=url_for( '.oauth_authorized', vendor=vendor, next=request.args.get('next') or request.referrer or None ) ) @oauth_app.route('/oauth-authorized/<oauth_vendor:vendor>/') def oauth_authorized(vendor): """Authorized OAuth and login or join with social account.""" if 'logged_id' in flask_session: return redirect(url_for('index')) next_url = request.args.get('next') or url_for('index') name_key, id_key = vendor.key_names resp = vendor.oauth_client.authorized_response() if resp is None: flash('You denied the request to sign in.', 'danger') return redirect(next_url) now = datetime.datetime.utcnow() user_id = resp[id_key] user_name = resp[name_key] with sa_session.begin(): try: social = sa_session.query(vendor.credential_table) \ .filter_by(identifier=user_id) \ .one() except NoResultFound: social = make_account(vendor.credential_table, user_name, user_id) sa_session.add(social) if vendor.oauth_version == Version.oauth1: social.token = resp['oauth_token'] social.token_secret = resp['oauth_token_secret'] flask_session['logged_id'] = social.user_id flask_session['logged_time'] = now flash('You were signed in as %s' % user_name, 'success') return redirect(next_url) def make_account(credential_table, name, user_id): """Make account. :param response: OAuth authorize response. :return: User record was created. :rtype: :class:`cliche.user.User` """ user = User(name=name) social_account = credential_table(user=user, identifier=user_id) return social_account class OAuthVendorConverter(BaseConverter): def __init__(self, url_map): super(OAuthVendorConverter, self).__init__(url_map) self.regex = '[^/]+' def to_python(self, value): for v in vendors: if v.name == value: return v raise ValidationError() def to_url(self, value): if type(value) == Vendor: return value.name else: return value
[ "\"\"\":mod:`cliche.web.social.twitter` --- Twitter Support\n", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", "\n", "Cliche provides Twitter login/join to use our service.\n", "It widely uses Flask-OAuthlib_ as its OAuth framework.\n", "\n", ".. _Flask-OAuthlib: https://flask-oauthlib.readthedocs.org/\n", "\n", "\"\"\"\n", "import collections\n", "import datetime\n", "import enum\n", "\n", "from flask import Blueprint, flash, redirect, request\n", "from flask import session as flask_session\n", "from flask import url_for\n", "from sqlalchemy.orm.exc import NoResultFound\n", "from werkzeug.routing import BaseConverter, ValidationError\n", "\n", "from ...credentials import TwitterCredential\n", "from ...user import User\n", "from ..db import session as sa_session\n", "from .provider import twitter\n", "\n", "\n", "__all__ = ('OAuthVendorConverter', 'Vendor', 'Version', 'login', 'oauth_app',\n", " 'oauth_authorized', 'vendors')\n", "\n", "\n", "oauth_app = Blueprint('oauth', __name__)\n", "\n", "Vendor = collections.namedtuple(\n", " 'Vendor',\n", " ['name', 'credential_table', 'oauth_version', 'oauth_client', 'key_names']\n", ")\n", "\n", "\n", "class Version(enum.Enum):\n", " oauth1 = 1\n", " oauth2 = 2\n", "\n", "\n", "vendors = [\n", " Vendor('twitter', TwitterCredential, Version.oauth1, twitter,\n", " ('screen_name', 'user_id'))\n", "]\n", "\n", "\n", "@oauth_app.route('/login/<oauth_vendor:vendor>/')\n", "def login(vendor):\n", " \"\"\"Login.\"\"\"\n", " if 'logged_id' in flask_session:\n", " return redirect(url_for('index'))\n", "\n", " return vendor.oauth_client.authorize(\n", " callback=url_for(\n", " '.oauth_authorized',\n", " vendor=vendor,\n", " next=request.args.get('next') or request.referrer or None\n", " )\n", " )\n", "\n", "\n", "@oauth_app.route('/oauth-authorized/<oauth_vendor:vendor>/')\n", "def oauth_authorized(vendor):\n", " \"\"\"Authorized OAuth and login or join with social account.\"\"\"\n", " if 'logged_id' in flask_session:\n", " return redirect(url_for('index'))\n", "\n", " next_url = request.args.get('next') or url_for('index')\n", "\n", " name_key, id_key = vendor.key_names\n", "\n", " resp = vendor.oauth_client.authorized_response()\n", " if resp is None:\n", " flash('You denied the request to sign in.', 'danger')\n", " return redirect(next_url)\n", "\n", " now = datetime.datetime.utcnow()\n", "\n", " user_id = resp[id_key]\n", " user_name = resp[name_key]\n", "\n", " with sa_session.begin():\n", " try:\n", " social = sa_session.query(vendor.credential_table) \\\n", " .filter_by(identifier=user_id) \\\n", " .one()\n", " except NoResultFound:\n", " social = make_account(vendor.credential_table, user_name, user_id)\n", " sa_session.add(social)\n", "\n", " if vendor.oauth_version == Version.oauth1:\n", " social.token = resp['oauth_token']\n", " social.token_secret = resp['oauth_token_secret']\n", "\n", " flask_session['logged_id'] = social.user_id\n", " flask_session['logged_time'] = now\n", "\n", " flash('You were signed in as %s' % user_name, 'success')\n", " return redirect(next_url)\n", "\n", "\n", "def make_account(credential_table, name, user_id):\n", " \"\"\"Make account.\n", "\n", " :param response: OAuth authorize response.\n", " :return: User record was created.\n", " :rtype: :class:`cliche.user.User`\n", "\n", " \"\"\"\n", " user = User(name=name)\n", " social_account = credential_table(user=user, identifier=user_id)\n", " return social_account\n", "\n", "\n", "class OAuthVendorConverter(BaseConverter):\n", "\n", " def __init__(self, url_map):\n", " super(OAuthVendorConverter, self).__init__(url_map)\n", " self.regex = '[^/]+'\n", "\n", " def to_python(self, value):\n", " for v in vendors:\n", " if v.name == value:\n", " return v\n", " raise ValidationError()\n", "\n", " def to_url(self, value):\n", " if type(value) == Vendor:\n", " return value.name\n", " else:\n", " return value\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
133
0
# Copyright 2014 Alcatel-Lucent USA Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutronclient.neutron.v2_0 import CreateCommand from neutronclient.neutron.v2_0 import DeleteCommand from neutronclient.neutron.v2_0 import ListCommand from neutronclient.neutron.v2_0 import ShowCommand class ListNetPartition(ListCommand): """List netpartitions that belong to a given tenant.""" resource = 'net_partition' list_columns = ['id', 'name'] class ShowNetPartition(ShowCommand): """Show information of a given netpartition.""" resource = 'net_partition' class CreateNetPartition(CreateCommand): """Create a netpartition for a given tenant.""" resource = 'net_partition' def add_known_arguments(self, parser): parser.add_argument( 'name', metavar='name', help='Name of netpartition to create.') def args2body(self, parsed_args): body = {'net_partition': {'name': parsed_args.name}, } return body class DeleteNetPartition(DeleteCommand): """Delete a given netpartition.""" resource = 'net_partition'
[ "# Copyright 2014 Alcatel-Lucent USA Inc.\n", "#\n", "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n", "# not use this file except in compliance with the License. You may obtain\n", "# a copy of the License at\n", "#\n", "# http://www.apache.org/licenses/LICENSE-2.0\n", "#\n", "# Unless required by applicable law or agreed to in writing, software\n", "# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n", "# License for the specific language governing permissions and limitations\n", "# under the License.\n", "#\n", "\n", "from neutronclient.neutron.v2_0 import CreateCommand\n", "from neutronclient.neutron.v2_0 import DeleteCommand\n", "from neutronclient.neutron.v2_0 import ListCommand\n", "from neutronclient.neutron.v2_0 import ShowCommand\n", "\n", "\n", "class ListNetPartition(ListCommand):\n", " \"\"\"List netpartitions that belong to a given tenant.\"\"\"\n", " resource = 'net_partition'\n", " list_columns = ['id', 'name']\n", "\n", "\n", "class ShowNetPartition(ShowCommand):\n", " \"\"\"Show information of a given netpartition.\"\"\"\n", "\n", " resource = 'net_partition'\n", "\n", "\n", "class CreateNetPartition(CreateCommand):\n", " \"\"\"Create a netpartition for a given tenant.\"\"\"\n", "\n", " resource = 'net_partition'\n", "\n", " def add_known_arguments(self, parser):\n", " parser.add_argument(\n", " 'name', metavar='name',\n", " help='Name of netpartition to create.')\n", "\n", " def args2body(self, parsed_args):\n", " body = {'net_partition': {'name': parsed_args.name}, }\n", " return body\n", "\n", "\n", "class DeleteNetPartition(DeleteCommand):\n", " \"\"\"Delete a given netpartition.\"\"\"\n", "\n", " resource = 'net_partition'\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
52
0
# Copyright (c) 2012-2015 Netforce Co. Ltd. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. from netforce.model import Model, fields, get_model from netforce.utils import get_data_path, roundup from netforce.database import get_active_db import time import uuid from netforce.access import get_active_company, set_active_user, get_active_user from . import utils from decimal import * class SaleQuot(Model): _name = "sale.quot" _string = "Quotation" _audit_log = True _name_field = "number" _key = ["number"] _multi_company = True _fields = { "number": fields.Char("Number", required=True, search=True), "ref": fields.Char("Ref", search=True), "contact_id": fields.Many2One("contact", "Contact", required=True, search=True), "date": fields.Date("Date", required=True, search=True), "exp_date": fields.Date("Valid Until"), "state": fields.Selection([("draft", "Draft"), ("waiting_approval", "Awaiting Approval"), ("approved", "Approved"), ("won", "Won"), ("lost", "Lost"), ("revised", "Revised")], "Status", function="get_state", store=True), "lines": fields.One2Many("sale.quot.line", "quot_id", "Lines"), "amount_subtotal": fields.Decimal("Subtotal", function="get_amount", function_multi=True, store=True), "amount_tax": fields.Decimal("Tax Amount", function="get_amount", function_multi=True, store=True), "amount_total": fields.Decimal("Total", function="get_amount", function_multi=True, store=True), "amount_total_words": fields.Char("Total Words", function="get_amount_total_words"), "qty_total": fields.Decimal("Total", function="get_qty_total"), "currency_id": fields.Many2One("currency", "Currency", required=True), "opport_id": fields.Many2One("sale.opportunity", "Opportunity", search=True), "user_id": fields.Many2One("base.user", "Owner", search=True), "tax_type": fields.Selection([["tax_ex", "Tax Exclusive"], ["tax_in", "Tax Inclusive"], ["no_tax", "No Tax"]], "Tax Type", required=True), "sales": fields.One2Many("sale.order", "quot_id", "Sales Orders"), "payment_terms": fields.Text("Payment Terms"), "other_info": fields.Text("Other Information"), "comments": fields.One2Many("message", "related_id", "Comments"), "activities": fields.One2Many("activity", "related_id", "Activities"), "documents": fields.One2Many("document", "related_id", "Documents"), "uuid": fields.Char("UUID"), "price_list_id": fields.Many2One("price.list", "Price List"), "emails": fields.One2Many("email.message", "related_id", "Emails"), "company_id": fields.Many2One("company", "Company"), "related_id": fields.Reference([["issue", "Issue"]], "Related To"), "ship_term_id": fields.Many2One("ship.term", "Shipping Terms"), "sequence_id": fields.Many2One("sequence", "Number Sequence"), "job_template_id": fields.Many2One("job.template", "Service Order Template"), "lost_sale_code_id": fields.Many2One("reason.code", "Lost Sale Reason Code", condition=[["type", "=", "lost_sale"]]), "agg_amount_total": fields.Decimal("Total Amount", agg_function=["sum", "amount_total"]), "agg_amount_subtotal": fields.Decimal("Total Amount w/o Tax", agg_function=["sum", "amount_subtotal"]), "year": fields.Char("Year", sql_function=["year", "date"]), "quarter": fields.Char("Quarter", sql_function=["quarter", "date"]), "month": fields.Char("Month", sql_function=["month", "date"]), "week": fields.Char("Week", sql_function=["week", "date"]), "est_costs": fields.One2Many("quot.cost","quot_id","Costs"), "est_cost_amount": fields.Float("Estimated Cost Amount", function="get_est_profit", function_multi=True), "est_profit_amount": fields.Float("Estimated Profit Amount", function="get_est_profit", function_multi=True), "est_margin_percent": fields.Float("Estimated Margin %", function="get_est_profit", function_multi=True), "currency_rates": fields.One2Many("custom.currency.rate","related_id","Currency Rates"), } def _get_number(self, context={}): seq_id = get_model("sequence").find_sequence(type="sale_quot") if not seq_id: return None while 1: num = get_model("sequence").get_next_number(seq_id, context=context) if not num: return None user_id = get_active_user() set_active_user(1) res = self.search([["number", "=", num]]) set_active_user(user_id) if not res: return num get_model("sequence").increment_number(seq_id, context=context) def _get_currency(self, context={}): settings = get_model("settings").browse(1) return settings.currency_id.id def _get_currency_rates(self,context={}): settings = get_model("settings").browse(1) lines=[] date = time.strftime("%Y-%m-%d") val = { "currency_id": settings.currency_id.id, "rate": settings.currency_id.get_rate(date,"sell") or 1 } if context.get('action_name'): # default for new quotation create via quotation form lines.append(val) else: # When users create or copy quotation from other modules or methods, one2many field cannot be appended without action key # bacause it must be created in the database along with quotation itself. # If action key such as 'create', 'delete' is missing, the default line will not be created. # So, the action_key 'create' has to be appended into the list also. lines.append(("create",val)) return lines _defaults = { "state": "draft", "date": lambda *a: time.strftime("%Y-%m-%d"), "number": _get_number, "currency_id": _get_currency, "tax_type": "tax_ex", "user_id": lambda self, context: get_active_user(), "uuid": lambda *a: str(uuid.uuid4()), "company_id": lambda *a: get_active_company(), "currency_rates": _get_currency_rates, } _constraints = ["check_fields"] _order = "date desc" def check_fields(self, ids, context={}): for obj in self.browse(ids): if obj.state in ("waiting_approval", "approved"): if not obj.lines: raise Exception("No lines in quotation") def create(self, vals, **kw): id = super().create(vals, **kw) self.function_store([id]) return id def write(self, ids, vals, **kw): opport_ids = [] for obj in self.browse(ids): if obj.opport_id: opport_ids.append(obj.opport_id.id) super().write(ids, vals, **kw) if opport_ids: get_model("sale.opportunity").function_store(opport_ids) self.function_store(ids) def function_store(self, ids, field_names=None, context={}): super().function_store(ids, field_names, context) opport_ids = [] for obj in self.browse(ids): if obj.opport_id: opport_ids.append(obj.opport_id.id) if opport_ids: get_model("sale.opportunity").function_store(opport_ids) def get_amount(self, ids, context={}): res = {} for obj in self.browse(ids): vals = {} subtotal = 0 tax = 0 for line in obj.lines: if line.is_hidden: continue if line.tax_id: line_tax = get_model("account.tax.rate").compute_tax( line.tax_id.id, line.amount, tax_type=obj.tax_type) else: line_tax = 0 tax += line_tax if obj.tax_type == "tax_in": subtotal += (line.amount or 0) - line_tax else: subtotal += line.amount or 0 vals["amount_subtotal"] = subtotal vals["amount_tax"] = tax vals["amount_total"] = subtotal + tax res[obj.id] = vals return res def get_qty_total(self, ids, context={}): res = {} for obj in self.browse(ids): qty = sum([line.qty for line in obj.lines]) res[obj.id] = qty or 0 return res def submit_for_approval(self, ids, context={}): for obj in self.browse(ids): if obj.state != "draft": raise Exception("Invalid state") obj.write({"state": "waiting_approval"}) self.trigger(ids, "submit_for_approval") def approve(self, ids, context={}): for obj in self.browse(ids): if obj.state not in ("draft", "waiting_approval"): raise Exception("Invalid state") obj.write({"state": "approved"}) def update_amounts(self, context): print("update_amounts") data = context["data"] data["amount_subtotal"] = 0 data["amount_tax"] = 0 tax_type = data["tax_type"] #===============>>> def _get_relative_currency_rate(currency_id): rate=None for r in data['currency_rates']: if r.get('currency_id')==currency_id: rate=r.get('rate') or 0 break if rate is None: print(data['date'],currency_id,data['currency_id']) rate_from=get_model("currency").get_rate([currency_id],data['date']) or Decimal(1) rate_to=get_model("currency").get_rate([data['currency_id']],data['date']) or Decimal(1) rate=rate_from/rate_to return rate item_costs={} for cost in data['est_costs']: if not cost: continue amt=cost['amount'] or 0 if cost.get('currency_id'): print(cost.get("currency_id.id"),cost.get("currency_id")) rate=_get_relative_currency_rate(cost.get("currency_id")) amt=amt*rate comps=[] if cost.get("sequence"): for comp in cost['sequence'].split("."): comps.append(comp) path=".".join(comps) k=(data['id'],path) item_costs.setdefault(k,0) item_costs[k]+=amt #<<<=============== for line in data["lines"]: if not line: continue amt = (line.get("qty") or 0) * (line.get("unit_price") or 0) amt = Decimal(roundup(amt)) if line.get("discount"): disc = Decimal(amt) * Decimal(line["discount"]) / Decimal(100) amt -= disc if line.get("discount_amount"): amt -= line["discount_amount"] line["amount"] = amt #===============>>> k=None if id in data: k=(data['id'],line.get("sequence",0)) else: k=(line.get("sequence",0)) cost=item_costs.get(k,0) profit=amt-cost margin=profit*100/amt if amt else 0 line["est_cost_amount"]=cost line["est_profit_amount"]=profit line["est_margin_percent"]=margin #<<<=============== hide_parents=[] for line in data["lines"]: if not line: continue if line.get("sequence") and line.get("hide_sub"): hide_parents.append(line["sequence"]) is_hidden={} hide_totals={} for line in data["lines"]: if not line: continue if not line.get("sequence"): continue parent_seq=None for seq in hide_parents: if line["sequence"].startswith(seq+"."): parent_seq=seq break if parent_seq: is_hidden[line["sequence"]]=True hide_totals.setdefault(parent_seq,0) hide_totals[parent_seq]+=line["amount"] for line in data["lines"]: if not line: continue if line.get("sequence") and line.get("hide_sub"): line["amount"]=hide_totals.get(line["sequence"],0) if line["qty"]: line["unit_price"]=line["amount"]/line["qty"] for line in data["lines"]: if is_hidden.get(line.get("sequence")): continue tax_id = line.get("tax_id") if tax_id: tax = get_model("account.tax.rate").compute_tax(tax_id, line["amount"], tax_type=tax_type) data["amount_tax"] += tax else: tax = 0 if tax_type == "tax_in": data["amount_subtotal"] += Decimal(line["amount"] - tax) else: data["amount_subtotal"] += Decimal(line["amount"]) data["amount_total"] = data["amount_subtotal"] + data["amount_tax"] return data def onchange_product(self, context): data = context["data"] contact_id = data.get("contact_id") if contact_id: contact = get_model("contact").browse(contact_id) else: contact = None path = context["path"] line = get_data_path(data, path, parent=True) prod_id = line.get("product_id") if not prod_id: return {} prod = get_model("product").browse(prod_id) line["description"] = prod.description line["est_margin_percent_input"] = prod.gross_profit line["qty"] = 1 if prod.uom_id is not None: line["uom_id"] = prod.uom_id.id pricelist_id = data["price_list_id"] price = None if pricelist_id: price = get_model("price.list").get_price(pricelist_id, prod.id, 1) price_list = get_model("price.list").browse(pricelist_id) price_currency_id = price_list.currency_id.id if price is None: price = prod.sale_price settings = get_model("settings").browse(1) price_currency_id = settings.currency_id.id if price is not None: currency_id = data["currency_id"] price_cur = get_model("currency").convert(price, price_currency_id, currency_id) line["unit_price"] = price_cur if prod.sale_tax_id is not None: line["tax_id"] = prod.sale_tax_id.id data = self.update_amounts(context) return data def onchange_qty(self, context): data = context["data"] path = context["path"] line = get_data_path(data, path, parent=True) prod_id = line.get("product_id") if not prod_id: return {} prod = get_model("product").browse(prod_id) pricelist_id = data["price_list_id"] qty = line["qty"] if line.get("unit_price") is None: price = None if pricelist_id: price = get_model("price.list").get_price(pricelist_id, prod.id, qty) price_list = get_model("price.list").browse(pricelist_id) price_currency_id = price_list.currency_id.id if price is None: price = prod.sale_price settings = get_model("settings").browse(1) price_currency_id = settings.currency_id.id if price is not None: currency_id = data["currency_id"] price_cur = get_model("currency").convert(price, price_currency_id, currency_id) line["unit_price"] = price_cur data = self.update_amounts(context) return data def onchange_contact(self, context): data = context["data"] contact_id = data.get("contact_id") if not contact_id: return {} contact = get_model("contact").browse(contact_id) data["payment_terms"] = contact.payment_terms data["price_list_id"] = contact.sale_price_list_id.id if contact.currency_id: data["currency_id"] = contact.currency_id.id else: settings = get_model("settings").browse(1) data["currency_id"] = settings.currency_id.id return data def onchange_uom(self, context): data = context["data"] path = context["path"] line = get_data_path(data, path, parent=True) prod_id = line.get("product_id") if not prod_id: return {} prod = get_model("product").browse(prod_id) uom_id = line.get("uom_id") if not uom_id: return {} uom = get_model("uom").browse(uom_id) if prod.sale_price is not None: line["unit_price"] = prod.sale_price * uom.ratio / prod.uom_id.ratio data = self.update_amounts(context) return data def copy(self, ids, context): obj = self.browse(ids)[0] vals = { "ref": obj.number, "contact_id": obj.contact_id.id, "currency_id": obj.currency_id.id, "tax_type": obj.tax_type, "payment_terms": obj.payment_terms, "other_info": obj.other_info, "exp_date": obj.exp_date, "opport_id": obj.opport_id.id, "lines": [], } for line in obj.lines: line_vals = { "product_id": line.product_id.id, "description": line.description, "qty": line.qty, "uom_id": line.uom_id.id, "unit_price": line.unit_price, "discount": line.discount, "discount_amount": line.discount_amount, "tax_id": line.tax_id.id, 'amount': line.amount, 'sequence': line.sequence, } vals["lines"].append(("create", line_vals)) new_id = self.create(vals, context=context) new_obj = self.browse(new_id) return { "next": { "name": "quot", "mode": "form", "active_id": new_id, }, "flash": "Quotation %s copied from %s" % (new_obj.number, obj.number), } def revise(self, ids, context): obj = self.browse(ids)[0] res = self.copy(ids, context) obj.write({"state": "revised"}) return res def copy_to_sale_order(self,ids,context): id=ids[0] obj=self.browse(id) sale_vals={ "ref": obj.number, "quot_id": obj.id, "contact_id": obj.contact_id.id, "currency_id": obj.currency_id.id, "tax_type": obj.tax_type, "lines": [], "user_id": obj.user_id.id, "other_info": obj.other_info, "payment_terms": obj.payment_terms, "price_list_id": obj.price_list_id.id, "job_template_id": obj.job_template_id.id, "est_costs": [], "currency_rates": [], "related_id": "sale.quot,%s"%obj.id, } for line in obj.lines: if not line.qty: continue prod=line.product_id line_vals={ "sequence": line.sequence, "product_id": prod.id, "description": line.description, "qty": line.qty, "uom_id": line.uom_id and line.uom_id.id or None, "unit_price": line.unit_price if not line.is_hidden else 0, "discount": line.discount if not line.is_hidden else 0, "discount_amount": line.discount_amount if not line.is_hidden else 0, "tax_id": line.tax_id.id if not line.is_hidden else None, "location_id": prod.location_id.id if prod else None, } if prod.locations: line_vals["location_id"] = prod.locations[0].location_id.id for loc in prod.locations: if loc.stock_qty: line_vals['location_id']=loc.location_id.id break sale_vals["lines"].append(("create",line_vals)) for cost in obj.est_costs: cost_vals={ "sequence": cost.sequence, "product_id": cost.product_id.id, "description": cost.description, "supplier_id": cost.supplier_id.id, "list_price": cost.list_price, "purchase_price": cost.purchase_price, "purchase_duty_percent": cost.purchase_duty_percent, "purchase_ship_percent": cost.purchase_ship_percent, "landed_cost": cost.landed_cost, "qty": cost.qty, "currency_id": cost.currency_id.id, } sale_vals["est_costs"].append(("create",cost_vals)) for r in obj.currency_rates: rate_vals={ "currency_id": r.currency_id.id, "rate": r.rate, } sale_vals["currency_rates"].append(("create",rate_vals)) sale_id=get_model("sale.order").create(sale_vals,context=context) sale=get_model("sale.order").browse(sale_id) return { "next": { "name": "sale", "mode": "form", "active_id": sale_id, }, "flash": "Sale order %s created from quotation %s"%(sale.number,obj.number) } def do_won(self, ids, context={}): for obj in self.browse(ids): assert obj.state == "approved" obj.write({"state": "won"}) def do_lost(self, ids, context={}): for obj in self.browse(ids): assert obj.state == "approved" obj.write({"state": "lost"}) def do_reopen(self, ids, context={}): for obj in self.browse(ids): assert obj.state in ("won", "lost") obj.write({"state": "approved"}) def get_state(self, ids, context={}): vals = {} for obj in self.browse(ids): state = obj.state if state == "approved": found = False for sale in obj.sales: if sale.state in ("confirmed", "done"): found = True break if found: state = "won" vals[obj.id] = state return vals def view_link(self, ids, context={}): obj = self.browse(ids)[0] uuid = obj.uuid dbname = get_active_db() return { "next": { "type": "url", "url": "/view_quot?dbname=%s&uuid=%s" % (dbname, uuid), } } def get_template_quot_form(self, ids, context={}): obj = self.browse(ids)[0] has_discount=False for line in obj.lines: if line.discount: has_discount=True if has_discount: return "quot_form_disc" else: return "quot_form" def to_draft(self, ids, context={}): obj = self.browse(ids)[0] obj.write({"state": "draft"}) def get_amount_total_words(self, ids, context={}): vals = {} for obj in self.browse(ids): amount_total_words = utils.num2word(obj.amount_total) vals[obj.id] = amount_total_words return vals def onchange_sequence(self, context={}): data = context["data"] seq_id = data["sequence_id"] context['date']=data['date'] if not seq_id: return None while 1: num = get_model("sequence").get_next_number(seq_id, context=context) res = self.search([["number", "=", num]]) if not res: break get_model("sequence").increment_number(seq_id, context=context) data["number"] = num return data def onchange_cost_product(self,context): data=context["data"] path=context["path"] line=get_data_path(data,path,parent=True) prod_id=line.get("product_id") if prod_id: prod=get_model("product").browse(prod_id) line["description"]=prod.name line["list_price"]=prod.purchase_price line["purchase_price"]=prod.purchase_price line["landed_cost"]=prod.landed_cost line["qty"]=1 line["uom_id"]=prod.uom_id.id line["currency_id"]=prod.purchase_currency_id.id line["purchase_duty_percent"]=prod.purchase_duty_percent line["purchase_ship_percent"]=prod.purchase_ship_percent line["landed_cost"]=prod.landed_cost line["amount"]=line['qty']*line['landed_cost'] or 0 if prod.suppliers: line["supplier_id"]=prod.suppliers[0].supplier_id.id return data def get_est_profit(self, ids, context={}): vals = {} for obj in self.browse(ids): cost=0 for line in obj.lines: cost+=line.est_cost_amount or 0 profit = (obj.amount_subtotal or 0) - cost margin=profit*100/obj.amount_subtotal if obj.amount_subtotal else None vals[obj.id] = { "est_cost_amount": cost, "est_profit_amount": profit, "est_margin_percent": margin, } return vals def create_est_costs(self,ids,context={}): obj=self.browse(ids[0]) del_ids=[] for cost in obj.est_costs: if cost.product_id: del_ids.append(cost.id) get_model("quot.cost").delete(del_ids) #obj.write({"est_costs":[("delete_all",)]}) line_sequence = 1 settings = get_model("settings").browse(1) for line in obj.lines: prod=line.product_id cur_line_sequence = line_sequence landed_cost = prod.landed_cost if not prod: continue if not prod.purchase_price and prod.type != "service": continue if not prod.cost_price and prod.type == "service": continue #if not line.sequence: #continue if "bundle" == prod.type: continue # update line seqence if not line.sequence: line.write({"sequence": cur_line_sequence}) line_sequence += 1 else: line_sequence = round(Decimal(line.sequence)) + Decimal(1) # comput cost if product is service if prod.type == "service": if round(prod.cost_price,2) == round(line.unit_price,2): landed_cost = prod.cost_price else: landed_cost = prod.cost_price * line.unit_price vals={ "quot_id": obj.id, "sequence": (line.sequence if not line.is_hidden else line.parent_sequence) if line.sequence else cur_line_sequence, "product_id": prod.id, "description": prod.name, "supplier_id": prod.suppliers[0].supplier_id.id if prod.suppliers else None, "list_price": prod.purchase_price, "purchase_price": prod.purchase_price, #"landed_cost": prod.cost_price if prod.type == "service" else prod.landed_cost, "landed_cost": landed_cost, "purchase_duty_percent": prod.purchase_duty_percent, "purchase_ship_percent": prod.purchase_ship_percent, "qty": line.qty, "currency_id": prod.purchase_currency_id.id or settings.currency_id.id, } get_model("quot.cost").create(vals) def merge_quotations(self,ids,context={}): if len(ids)<2: raise Exception("Can not merge less than two quotations") contact_ids=[] currency_ids=[] tax_types=[] for obj in self.browse(ids): contact_ids.append(obj.contact_id.id) currency_ids.append(obj.currency_id.id) tax_types.append(obj.tax_type) contact_ids=list(set(contact_ids)) currency_ids=list(set(currency_ids)) tax_types=list(set(tax_types)) if len(contact_ids)>1: raise Exception("Quotation customers have to be the same") if len(currency_ids)>1: raise Exception("Quotation currencies have to be the same") if len(tax_types)>1: raise Exception("Quotation tax types have to be the same") vals = { "contact_id": contact_ids[0], "currency_id": currency_ids[0], "tax_type": tax_types[0], "lines": [], "est_costs": [], } seq=0 refs=[] for obj in sorted(self.browse(ids),key=lambda obj: obj.number): refs.append(obj.number) seq_map={} for line in obj.lines: seq+=1 seq_map[line.sequence]=seq qty=line.qty or 0 unit_price=line.unit_price or 0 amt=qty*unit_price disc=amt*(line.discount or 0)/Decimal(100) line_vals = { "sequence": seq, "product_id": line.product_id.id, "description": line.description, "qty": qty, "uom_id": line.uom_id.id, "unit_price": unit_price, "discount": disc, "amount": amt, "tax_id": line.tax_id.id, } vals["lines"].append(("create", line_vals)) for cost in obj.est_costs: cost_vals={ "sequence": seq_map.get(cost.sequence), "product_id": cost.product_id.id, "description": cost.description, "supplier_id": cost.supplier_id.id, "list_price": cost.list_price, "purchase_price": cost.purchase_price, "landed_cost": cost.landed_cost, "qty": cost.qty, "currency_id": cost.currency_id.id, } vals["est_costs"].append(("create",cost_vals)) vals['ref']=', '.join([ref for ref in refs]) new_id = self.create(vals, context=context) new_obj = self.browse(new_id) return { "next": { "name": "quot", "mode": "form", "active_id": new_id, }, "flash": "Quotations merged", } def onchange_est_margin(self,context={}): data=context["data"] path=context["path"] line=get_data_path(data,path,parent=True) margin=line["est_margin_percent_input"] amt=line["est_cost_amount"]/(1-margin/Decimal(100)) price=round(amt/line["qty"]) line["unit_price"]=price self.update_amounts(context) return data def get_relative_currency_rate(self,ids,currency_id): obj=self.browse(ids[0]) rate=None for r in obj.currency_rates: if r.currency_id.id==currency_id: rate=r.rate break if rate is None: rate_from=get_model("currency").get_rate([currency_id],obj.date) or Decimal(1) rate_to=obj.currency_id.get_rate(obj.date) or Decimal(1) rate=rate_from/rate_to return rate def update_cost_amount(self,context={}): data=context['data'] path=context['path'] line=get_data_path(data,path,parent=True) line['amount']=(line['qty'] or 0) *(line['landed_cost'] or 0) return data SaleQuot.register()
[ "# Copyright (c) 2012-2015 Netforce Co. Ltd.\n", "# \n", "# Permission is hereby granted, free of charge, to any person obtaining a copy\n", "# of this software and associated documentation files (the \"Software\"), to deal\n", "# in the Software without restriction, including without limitation the rights\n", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n", "# copies of the Software, and to permit persons to whom the Software is\n", "# furnished to do so, subject to the following conditions:\n", "# \n", "# The above copyright notice and this permission notice shall be included in all\n", "# copies or substantial portions of the Software.\n", "# \n", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n", "# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n", "# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n", "# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n", "# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n", "# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE\n", "# OR OTHER DEALINGS IN THE SOFTWARE.\n", "\n", "from netforce.model import Model, fields, get_model\n", "from netforce.utils import get_data_path, roundup\n", "from netforce.database import get_active_db\n", "import time\n", "import uuid\n", "from netforce.access import get_active_company, set_active_user, get_active_user\n", "from . import utils\n", "from decimal import *\n", "\n", "class SaleQuot(Model):\n", " _name = \"sale.quot\"\n", " _string = \"Quotation\"\n", " _audit_log = True\n", " _name_field = \"number\"\n", " _key = [\"number\"]\n", " _multi_company = True\n", " _fields = {\n", " \"number\": fields.Char(\"Number\", required=True, search=True),\n", " \"ref\": fields.Char(\"Ref\", search=True),\n", " \"contact_id\": fields.Many2One(\"contact\", \"Contact\", required=True, search=True),\n", " \"date\": fields.Date(\"Date\", required=True, search=True),\n", " \"exp_date\": fields.Date(\"Valid Until\"),\n", " \"state\": fields.Selection([(\"draft\", \"Draft\"), (\"waiting_approval\", \"Awaiting Approval\"), (\"approved\", \"Approved\"), (\"won\", \"Won\"), (\"lost\", \"Lost\"), (\"revised\", \"Revised\")], \"Status\", function=\"get_state\", store=True),\n", " \"lines\": fields.One2Many(\"sale.quot.line\", \"quot_id\", \"Lines\"),\n", " \"amount_subtotal\": fields.Decimal(\"Subtotal\", function=\"get_amount\", function_multi=True, store=True),\n", " \"amount_tax\": fields.Decimal(\"Tax Amount\", function=\"get_amount\", function_multi=True, store=True),\n", " \"amount_total\": fields.Decimal(\"Total\", function=\"get_amount\", function_multi=True, store=True),\n", " \"amount_total_words\": fields.Char(\"Total Words\", function=\"get_amount_total_words\"),\n", " \"qty_total\": fields.Decimal(\"Total\", function=\"get_qty_total\"),\n", " \"currency_id\": fields.Many2One(\"currency\", \"Currency\", required=True),\n", " \"opport_id\": fields.Many2One(\"sale.opportunity\", \"Opportunity\", search=True),\n", " \"user_id\": fields.Many2One(\"base.user\", \"Owner\", search=True),\n", " \"tax_type\": fields.Selection([[\"tax_ex\", \"Tax Exclusive\"], [\"tax_in\", \"Tax Inclusive\"], [\"no_tax\", \"No Tax\"]], \"Tax Type\", required=True),\n", " \"sales\": fields.One2Many(\"sale.order\", \"quot_id\", \"Sales Orders\"),\n", " \"payment_terms\": fields.Text(\"Payment Terms\"),\n", " \"other_info\": fields.Text(\"Other Information\"),\n", " \"comments\": fields.One2Many(\"message\", \"related_id\", \"Comments\"),\n", " \"activities\": fields.One2Many(\"activity\", \"related_id\", \"Activities\"),\n", " \"documents\": fields.One2Many(\"document\", \"related_id\", \"Documents\"),\n", " \"uuid\": fields.Char(\"UUID\"),\n", " \"price_list_id\": fields.Many2One(\"price.list\", \"Price List\"),\n", " \"emails\": fields.One2Many(\"email.message\", \"related_id\", \"Emails\"),\n", " \"company_id\": fields.Many2One(\"company\", \"Company\"),\n", " \"related_id\": fields.Reference([[\"issue\", \"Issue\"]], \"Related To\"),\n", " \"ship_term_id\": fields.Many2One(\"ship.term\", \"Shipping Terms\"),\n", " \"sequence_id\": fields.Many2One(\"sequence\", \"Number Sequence\"),\n", " \"job_template_id\": fields.Many2One(\"job.template\", \"Service Order Template\"),\n", " \"lost_sale_code_id\": fields.Many2One(\"reason.code\", \"Lost Sale Reason Code\", condition=[[\"type\", \"=\", \"lost_sale\"]]),\n", " \"agg_amount_total\": fields.Decimal(\"Total Amount\", agg_function=[\"sum\", \"amount_total\"]),\n", " \"agg_amount_subtotal\": fields.Decimal(\"Total Amount w/o Tax\", agg_function=[\"sum\", \"amount_subtotal\"]),\n", " \"year\": fields.Char(\"Year\", sql_function=[\"year\", \"date\"]),\n", " \"quarter\": fields.Char(\"Quarter\", sql_function=[\"quarter\", \"date\"]),\n", " \"month\": fields.Char(\"Month\", sql_function=[\"month\", \"date\"]),\n", " \"week\": fields.Char(\"Week\", sql_function=[\"week\", \"date\"]),\n", " \"est_costs\": fields.One2Many(\"quot.cost\",\"quot_id\",\"Costs\"),\n", " \"est_cost_amount\": fields.Float(\"Estimated Cost Amount\", function=\"get_est_profit\", function_multi=True),\n", " \"est_profit_amount\": fields.Float(\"Estimated Profit Amount\", function=\"get_est_profit\", function_multi=True),\n", " \"est_margin_percent\": fields.Float(\"Estimated Margin %\", function=\"get_est_profit\", function_multi=True),\n", " \"currency_rates\": fields.One2Many(\"custom.currency.rate\",\"related_id\",\"Currency Rates\"),\n", " }\n", "\n", " def _get_number(self, context={}):\n", " seq_id = get_model(\"sequence\").find_sequence(type=\"sale_quot\")\n", " if not seq_id:\n", " return None\n", " while 1:\n", " num = get_model(\"sequence\").get_next_number(seq_id, context=context)\n", " if not num:\n", " return None\n", " user_id = get_active_user()\n", " set_active_user(1)\n", " res = self.search([[\"number\", \"=\", num]])\n", " set_active_user(user_id)\n", " if not res:\n", " return num\n", " get_model(\"sequence\").increment_number(seq_id, context=context)\n", "\n", " def _get_currency(self, context={}):\n", " settings = get_model(\"settings\").browse(1)\n", " return settings.currency_id.id\n", "\n", " def _get_currency_rates(self,context={}):\n", " settings = get_model(\"settings\").browse(1)\n", " lines=[]\n", " date = time.strftime(\"%Y-%m-%d\")\n", " val = {\n", " \"currency_id\": settings.currency_id.id,\n", " \"rate\": settings.currency_id.get_rate(date,\"sell\") or 1\n", " }\n", " if context.get('action_name'):\n", " # default for new quotation create via quotation form\n", " lines.append(val)\n", " else:\n", " # When users create or copy quotation from other modules or methods, one2many field cannot be appended without action key\n", " # bacause it must be created in the database along with quotation itself.\n", " # If action key such as 'create', 'delete' is missing, the default line will not be created.\n", " # So, the action_key 'create' has to be appended into the list also.\n", " lines.append((\"create\",val))\n", " return lines\n", "\n", " _defaults = {\n", " \"state\": \"draft\",\n", " \"date\": lambda *a: time.strftime(\"%Y-%m-%d\"),\n", " \"number\": _get_number,\n", " \"currency_id\": _get_currency,\n", " \"tax_type\": \"tax_ex\",\n", " \"user_id\": lambda self, context: get_active_user(),\n", " \"uuid\": lambda *a: str(uuid.uuid4()),\n", " \"company_id\": lambda *a: get_active_company(),\n", " \"currency_rates\": _get_currency_rates,\n", " }\n", " _constraints = [\"check_fields\"]\n", " _order = \"date desc\"\n", "\n", " def check_fields(self, ids, context={}):\n", " for obj in self.browse(ids):\n", " if obj.state in (\"waiting_approval\", \"approved\"):\n", " if not obj.lines:\n", " raise Exception(\"No lines in quotation\")\n", "\n", " def create(self, vals, **kw):\n", " id = super().create(vals, **kw)\n", " self.function_store([id])\n", " return id\n", "\n", " def write(self, ids, vals, **kw):\n", " opport_ids = []\n", " for obj in self.browse(ids):\n", " if obj.opport_id:\n", " opport_ids.append(obj.opport_id.id)\n", " super().write(ids, vals, **kw)\n", " if opport_ids:\n", " get_model(\"sale.opportunity\").function_store(opport_ids)\n", " self.function_store(ids)\n", "\n", " def function_store(self, ids, field_names=None, context={}):\n", " super().function_store(ids, field_names, context)\n", " opport_ids = []\n", " for obj in self.browse(ids):\n", " if obj.opport_id:\n", " opport_ids.append(obj.opport_id.id)\n", " if opport_ids:\n", " get_model(\"sale.opportunity\").function_store(opport_ids)\n", "\n", " def get_amount(self, ids, context={}):\n", " res = {}\n", " for obj in self.browse(ids):\n", " vals = {}\n", " subtotal = 0\n", " tax = 0\n", " for line in obj.lines:\n", " if line.is_hidden:\n", " continue\n", " if line.tax_id:\n", " line_tax = get_model(\"account.tax.rate\").compute_tax(\n", " line.tax_id.id, line.amount, tax_type=obj.tax_type)\n", " else:\n", " line_tax = 0\n", " tax += line_tax\n", " if obj.tax_type == \"tax_in\":\n", " subtotal += (line.amount or 0) - line_tax\n", " else:\n", " subtotal += line.amount or 0\n", " vals[\"amount_subtotal\"] = subtotal\n", " vals[\"amount_tax\"] = tax\n", " vals[\"amount_total\"] = subtotal + tax\n", " res[obj.id] = vals\n", " return res\n", "\n", " def get_qty_total(self, ids, context={}):\n", " res = {}\n", " for obj in self.browse(ids):\n", " qty = sum([line.qty for line in obj.lines])\n", " res[obj.id] = qty or 0\n", " return res\n", "\n", " def submit_for_approval(self, ids, context={}):\n", " for obj in self.browse(ids):\n", " if obj.state != \"draft\":\n", " raise Exception(\"Invalid state\")\n", " obj.write({\"state\": \"waiting_approval\"})\n", " self.trigger(ids, \"submit_for_approval\")\n", "\n", " def approve(self, ids, context={}):\n", " for obj in self.browse(ids):\n", " if obj.state not in (\"draft\", \"waiting_approval\"):\n", " raise Exception(\"Invalid state\")\n", " obj.write({\"state\": \"approved\"})\n", "\n", " def update_amounts(self, context):\n", " print(\"update_amounts\")\n", " data = context[\"data\"]\n", " data[\"amount_subtotal\"] = 0\n", " data[\"amount_tax\"] = 0\n", " tax_type = data[\"tax_type\"]\n", " #===============>>>\n", " def _get_relative_currency_rate(currency_id):\n", " rate=None\n", " for r in data['currency_rates']:\n", " if r.get('currency_id')==currency_id:\n", " rate=r.get('rate') or 0\n", " break\n", " if rate is None:\n", " print(data['date'],currency_id,data['currency_id'])\n", " rate_from=get_model(\"currency\").get_rate([currency_id],data['date']) or Decimal(1)\n", " rate_to=get_model(\"currency\").get_rate([data['currency_id']],data['date']) or Decimal(1)\n", " rate=rate_from/rate_to\n", " return rate\n", " item_costs={}\n", " for cost in data['est_costs']:\n", " if not cost:\n", " continue\n", " amt=cost['amount'] or 0\n", " if cost.get('currency_id'):\n", " print(cost.get(\"currency_id.id\"),cost.get(\"currency_id\"))\n", " rate=_get_relative_currency_rate(cost.get(\"currency_id\"))\n", " amt=amt*rate\n", " comps=[]\n", " if cost.get(\"sequence\"):\n", " for comp in cost['sequence'].split(\".\"):\n", " comps.append(comp)\n", " path=\".\".join(comps)\n", " k=(data['id'],path)\n", " item_costs.setdefault(k,0)\n", " item_costs[k]+=amt\n", " #<<<===============\n", " for line in data[\"lines\"]:\n", " if not line:\n", " continue\n", " amt = (line.get(\"qty\") or 0) * (line.get(\"unit_price\") or 0)\n", " amt = Decimal(roundup(amt))\n", " if line.get(\"discount\"):\n", " disc = Decimal(amt) * Decimal(line[\"discount\"]) / Decimal(100)\n", " amt -= disc\n", " if line.get(\"discount_amount\"):\n", " amt -= line[\"discount_amount\"]\n", " line[\"amount\"] = amt\n", " #===============>>>\n", " k=None\n", " if id in data:\n", " k=(data['id'],line.get(\"sequence\",0))\n", " else:\n", " k=(line.get(\"sequence\",0))\n", " cost=item_costs.get(k,0)\n", " profit=amt-cost\n", " margin=profit*100/amt if amt else 0\n", " line[\"est_cost_amount\"]=cost\n", " line[\"est_profit_amount\"]=profit\n", " line[\"est_margin_percent\"]=margin\n", " #<<<===============\n", " hide_parents=[]\n", " for line in data[\"lines\"]:\n", " if not line:\n", " continue\n", " if line.get(\"sequence\") and line.get(\"hide_sub\"):\n", " hide_parents.append(line[\"sequence\"])\n", " is_hidden={}\n", " hide_totals={}\n", " for line in data[\"lines\"]:\n", " if not line:\n", " continue\n", " if not line.get(\"sequence\"):\n", " continue\n", " parent_seq=None\n", " for seq in hide_parents:\n", " if line[\"sequence\"].startswith(seq+\".\"):\n", " parent_seq=seq\n", " break\n", " if parent_seq:\n", " is_hidden[line[\"sequence\"]]=True\n", " hide_totals.setdefault(parent_seq,0)\n", " hide_totals[parent_seq]+=line[\"amount\"]\n", " for line in data[\"lines\"]:\n", " if not line:\n", " continue\n", " if line.get(\"sequence\") and line.get(\"hide_sub\"):\n", " line[\"amount\"]=hide_totals.get(line[\"sequence\"],0)\n", " if line[\"qty\"]:\n", " line[\"unit_price\"]=line[\"amount\"]/line[\"qty\"]\n", " for line in data[\"lines\"]:\n", " if is_hidden.get(line.get(\"sequence\")):\n", " continue\n", " tax_id = line.get(\"tax_id\")\n", " if tax_id:\n", " tax = get_model(\"account.tax.rate\").compute_tax(tax_id, line[\"amount\"], tax_type=tax_type)\n", " data[\"amount_tax\"] += tax\n", " else:\n", " tax = 0\n", " if tax_type == \"tax_in\":\n", " data[\"amount_subtotal\"] += Decimal(line[\"amount\"] - tax)\n", " else:\n", " data[\"amount_subtotal\"] += Decimal(line[\"amount\"])\n", " data[\"amount_total\"] = data[\"amount_subtotal\"] + data[\"amount_tax\"]\n", " return data\n", "\n", " def onchange_product(self, context):\n", " data = context[\"data\"]\n", " contact_id = data.get(\"contact_id\")\n", " if contact_id:\n", " contact = get_model(\"contact\").browse(contact_id)\n", " else:\n", " contact = None\n", " path = context[\"path\"]\n", " line = get_data_path(data, path, parent=True)\n", " prod_id = line.get(\"product_id\")\n", " if not prod_id:\n", " return {}\n", " prod = get_model(\"product\").browse(prod_id)\n", " line[\"description\"] = prod.description\n", " line[\"est_margin_percent_input\"] = prod.gross_profit\n", " line[\"qty\"] = 1\n", " if prod.uom_id is not None:\n", " line[\"uom_id\"] = prod.uom_id.id\n", " pricelist_id = data[\"price_list_id\"]\n", " price = None\n", " if pricelist_id:\n", " price = get_model(\"price.list\").get_price(pricelist_id, prod.id, 1)\n", " price_list = get_model(\"price.list\").browse(pricelist_id)\n", " price_currency_id = price_list.currency_id.id\n", " if price is None:\n", " price = prod.sale_price\n", " settings = get_model(\"settings\").browse(1)\n", " price_currency_id = settings.currency_id.id\n", " if price is not None:\n", " currency_id = data[\"currency_id\"]\n", " price_cur = get_model(\"currency\").convert(price, price_currency_id, currency_id)\n", " line[\"unit_price\"] = price_cur\n", " if prod.sale_tax_id is not None:\n", " line[\"tax_id\"] = prod.sale_tax_id.id\n", " data = self.update_amounts(context)\n", " return data\n", "\n", " def onchange_qty(self, context):\n", " data = context[\"data\"]\n", " path = context[\"path\"]\n", " line = get_data_path(data, path, parent=True)\n", " prod_id = line.get(\"product_id\")\n", " if not prod_id:\n", " return {}\n", " prod = get_model(\"product\").browse(prod_id)\n", " pricelist_id = data[\"price_list_id\"]\n", " qty = line[\"qty\"]\n", " if line.get(\"unit_price\") is None:\n", " price = None\n", " if pricelist_id:\n", " price = get_model(\"price.list\").get_price(pricelist_id, prod.id, qty)\n", " price_list = get_model(\"price.list\").browse(pricelist_id)\n", " price_currency_id = price_list.currency_id.id\n", " if price is None:\n", " price = prod.sale_price\n", " settings = get_model(\"settings\").browse(1)\n", " price_currency_id = settings.currency_id.id\n", " if price is not None:\n", " currency_id = data[\"currency_id\"]\n", " price_cur = get_model(\"currency\").convert(price, price_currency_id, currency_id)\n", " line[\"unit_price\"] = price_cur\n", " data = self.update_amounts(context)\n", " return data\n", "\n", " def onchange_contact(self, context):\n", " data = context[\"data\"]\n", " contact_id = data.get(\"contact_id\")\n", " if not contact_id:\n", " return {}\n", " contact = get_model(\"contact\").browse(contact_id)\n", " data[\"payment_terms\"] = contact.payment_terms\n", " data[\"price_list_id\"] = contact.sale_price_list_id.id\n", " if contact.currency_id:\n", " data[\"currency_id\"] = contact.currency_id.id\n", " else:\n", " settings = get_model(\"settings\").browse(1)\n", " data[\"currency_id\"] = settings.currency_id.id\n", " return data\n", "\n", " def onchange_uom(self, context):\n", " data = context[\"data\"]\n", " path = context[\"path\"]\n", " line = get_data_path(data, path, parent=True)\n", " prod_id = line.get(\"product_id\")\n", " if not prod_id:\n", " return {}\n", " prod = get_model(\"product\").browse(prod_id)\n", " uom_id = line.get(\"uom_id\")\n", " if not uom_id:\n", " return {}\n", " uom = get_model(\"uom\").browse(uom_id)\n", " if prod.sale_price is not None:\n", " line[\"unit_price\"] = prod.sale_price * uom.ratio / prod.uom_id.ratio\n", " data = self.update_amounts(context)\n", " return data\n", "\n", " def copy(self, ids, context):\n", " obj = self.browse(ids)[0]\n", " vals = {\n", " \"ref\": obj.number,\n", " \"contact_id\": obj.contact_id.id,\n", " \"currency_id\": obj.currency_id.id,\n", " \"tax_type\": obj.tax_type,\n", " \"payment_terms\": obj.payment_terms,\n", " \"other_info\": obj.other_info,\n", " \"exp_date\": obj.exp_date,\n", " \"opport_id\": obj.opport_id.id,\n", " \"lines\": [],\n", " }\n", " for line in obj.lines:\n", " line_vals = {\n", " \"product_id\": line.product_id.id,\n", " \"description\": line.description,\n", " \"qty\": line.qty,\n", " \"uom_id\": line.uom_id.id,\n", " \"unit_price\": line.unit_price,\n", " \"discount\": line.discount,\n", " \"discount_amount\": line.discount_amount,\n", " \"tax_id\": line.tax_id.id,\n", " 'amount': line.amount,\n", " 'sequence': line.sequence,\n", " }\n", " vals[\"lines\"].append((\"create\", line_vals))\n", " new_id = self.create(vals, context=context)\n", " new_obj = self.browse(new_id)\n", " return {\n", " \"next\": {\n", " \"name\": \"quot\",\n", " \"mode\": \"form\",\n", " \"active_id\": new_id,\n", " },\n", " \"flash\": \"Quotation %s copied from %s\" % (new_obj.number, obj.number),\n", " }\n", "\n", " def revise(self, ids, context):\n", " obj = self.browse(ids)[0]\n", " res = self.copy(ids, context)\n", " obj.write({\"state\": \"revised\"})\n", " return res\n", "\n", " def copy_to_sale_order(self,ids,context):\n", " id=ids[0]\n", " obj=self.browse(id)\n", " sale_vals={\n", " \"ref\": obj.number,\n", " \"quot_id\": obj.id,\n", " \"contact_id\": obj.contact_id.id,\n", " \"currency_id\": obj.currency_id.id,\n", " \"tax_type\": obj.tax_type,\n", " \"lines\": [],\n", " \"user_id\": obj.user_id.id,\n", " \"other_info\": obj.other_info,\n", " \"payment_terms\": obj.payment_terms,\n", " \"price_list_id\": obj.price_list_id.id,\n", " \"job_template_id\": obj.job_template_id.id,\n", " \"est_costs\": [],\n", " \"currency_rates\": [],\n", " \"related_id\": \"sale.quot,%s\"%obj.id,\n", " }\n", " for line in obj.lines:\n", " if not line.qty:\n", " continue\n", " prod=line.product_id\n", " line_vals={\n", " \"sequence\": line.sequence,\n", " \"product_id\": prod.id,\n", " \"description\": line.description,\n", " \"qty\": line.qty,\n", " \"uom_id\": line.uom_id and line.uom_id.id or None,\n", " \"unit_price\": line.unit_price if not line.is_hidden else 0,\n", " \"discount\": line.discount if not line.is_hidden else 0,\n", " \"discount_amount\": line.discount_amount if not line.is_hidden else 0,\n", " \"tax_id\": line.tax_id.id if not line.is_hidden else None,\n", " \"location_id\": prod.location_id.id if prod else None,\n", " }\n", " if prod.locations:\n", " line_vals[\"location_id\"] = prod.locations[0].location_id.id\n", " for loc in prod.locations:\n", " if loc.stock_qty:\n", " line_vals['location_id']=loc.location_id.id\n", " break\n", " sale_vals[\"lines\"].append((\"create\",line_vals))\n", " for cost in obj.est_costs:\n", " cost_vals={\n", " \"sequence\": cost.sequence,\n", " \"product_id\": cost.product_id.id,\n", " \"description\": cost.description,\n", " \"supplier_id\": cost.supplier_id.id,\n", " \"list_price\": cost.list_price,\n", " \"purchase_price\": cost.purchase_price,\n", " \"purchase_duty_percent\": cost.purchase_duty_percent,\n", " \"purchase_ship_percent\": cost.purchase_ship_percent,\n", " \"landed_cost\": cost.landed_cost,\n", " \"qty\": cost.qty,\n", " \"currency_id\": cost.currency_id.id,\n", " }\n", " sale_vals[\"est_costs\"].append((\"create\",cost_vals))\n", " for r in obj.currency_rates:\n", " rate_vals={\n", " \"currency_id\": r.currency_id.id,\n", " \"rate\": r.rate,\n", " }\n", " sale_vals[\"currency_rates\"].append((\"create\",rate_vals))\n", " sale_id=get_model(\"sale.order\").create(sale_vals,context=context)\n", " sale=get_model(\"sale.order\").browse(sale_id)\n", " return {\n", " \"next\": {\n", " \"name\": \"sale\",\n", " \"mode\": \"form\",\n", " \"active_id\": sale_id,\n", " },\n", " \"flash\": \"Sale order %s created from quotation %s\"%(sale.number,obj.number)\n", " }\n", "\n", " def do_won(self, ids, context={}):\n", " for obj in self.browse(ids):\n", " assert obj.state == \"approved\"\n", " obj.write({\"state\": \"won\"})\n", "\n", " def do_lost(self, ids, context={}):\n", " for obj in self.browse(ids):\n", " assert obj.state == \"approved\"\n", " obj.write({\"state\": \"lost\"})\n", "\n", " def do_reopen(self, ids, context={}):\n", " for obj in self.browse(ids):\n", " assert obj.state in (\"won\", \"lost\")\n", " obj.write({\"state\": \"approved\"})\n", "\n", " def get_state(self, ids, context={}):\n", " vals = {}\n", " for obj in self.browse(ids):\n", " state = obj.state\n", " if state == \"approved\":\n", " found = False\n", " for sale in obj.sales:\n", " if sale.state in (\"confirmed\", \"done\"):\n", " found = True\n", " break\n", " if found:\n", " state = \"won\"\n", " vals[obj.id] = state\n", " return vals\n", "\n", " def view_link(self, ids, context={}):\n", " obj = self.browse(ids)[0]\n", " uuid = obj.uuid\n", " dbname = get_active_db()\n", " return {\n", " \"next\": {\n", " \"type\": \"url\",\n", " \"url\": \"/view_quot?dbname=%s&uuid=%s\" % (dbname, uuid),\n", " }\n", " }\n", "\n", " def get_template_quot_form(self, ids, context={}):\n", " obj = self.browse(ids)[0]\n", " has_discount=False\n", " for line in obj.lines:\n", " if line.discount:\n", " has_discount=True\n", " if has_discount:\n", " return \"quot_form_disc\"\n", " else:\n", " return \"quot_form\"\n", "\n", " def to_draft(self, ids, context={}):\n", " obj = self.browse(ids)[0]\n", " obj.write({\"state\": \"draft\"})\n", "\n", " def get_amount_total_words(self, ids, context={}):\n", " vals = {}\n", " for obj in self.browse(ids):\n", " amount_total_words = utils.num2word(obj.amount_total)\n", " vals[obj.id] = amount_total_words\n", " return vals\n", "\n", " def onchange_sequence(self, context={}):\n", " data = context[\"data\"]\n", " seq_id = data[\"sequence_id\"]\n", " context['date']=data['date']\n", " if not seq_id:\n", " return None\n", " while 1:\n", " num = get_model(\"sequence\").get_next_number(seq_id, context=context)\n", " res = self.search([[\"number\", \"=\", num]])\n", " if not res:\n", " break\n", " get_model(\"sequence\").increment_number(seq_id, context=context)\n", " data[\"number\"] = num\n", " return data\n", "\n", " def onchange_cost_product(self,context):\n", " data=context[\"data\"]\n", " path=context[\"path\"]\n", " line=get_data_path(data,path,parent=True)\n", " prod_id=line.get(\"product_id\")\n", " if prod_id:\n", " prod=get_model(\"product\").browse(prod_id)\n", " line[\"description\"]=prod.name\n", " line[\"list_price\"]=prod.purchase_price\n", " line[\"purchase_price\"]=prod.purchase_price\n", " line[\"landed_cost\"]=prod.landed_cost\n", " line[\"qty\"]=1\n", " line[\"uom_id\"]=prod.uom_id.id\n", " line[\"currency_id\"]=prod.purchase_currency_id.id\n", " line[\"purchase_duty_percent\"]=prod.purchase_duty_percent\n", " line[\"purchase_ship_percent\"]=prod.purchase_ship_percent\n", " line[\"landed_cost\"]=prod.landed_cost\n", " line[\"amount\"]=line['qty']*line['landed_cost'] or 0\n", "\n", " if prod.suppliers:\n", " line[\"supplier_id\"]=prod.suppliers[0].supplier_id.id\n", " return data\n", "\n", " def get_est_profit(self, ids, context={}):\n", " vals = {}\n", " for obj in self.browse(ids):\n", " cost=0\n", " for line in obj.lines:\n", " cost+=line.est_cost_amount or 0\n", " profit = (obj.amount_subtotal or 0) - cost\n", " margin=profit*100/obj.amount_subtotal if obj.amount_subtotal else None\n", " vals[obj.id] = {\n", " \"est_cost_amount\": cost,\n", " \"est_profit_amount\": profit,\n", " \"est_margin_percent\": margin,\n", " }\n", " return vals\n", "\n", " def create_est_costs(self,ids,context={}):\n", " obj=self.browse(ids[0])\n", " del_ids=[]\n", " for cost in obj.est_costs:\n", " if cost.product_id:\n", " del_ids.append(cost.id)\n", " get_model(\"quot.cost\").delete(del_ids)\n", " #obj.write({\"est_costs\":[(\"delete_all\",)]})\n", " line_sequence = 1\n", " settings = get_model(\"settings\").browse(1)\n", " for line in obj.lines:\n", " prod=line.product_id\n", " cur_line_sequence = line_sequence\n", " landed_cost = prod.landed_cost\n", " if not prod:\n", " continue\n", " if not prod.purchase_price and prod.type != \"service\":\n", " continue\n", " if not prod.cost_price and prod.type == \"service\":\n", " continue\n", " #if not line.sequence:\n", " #continue\n", " if \"bundle\" == prod.type:\n", " continue\n", " # update line seqence\n", " if not line.sequence:\n", " line.write({\"sequence\": cur_line_sequence})\n", " line_sequence += 1\n", " else:\n", " line_sequence = round(Decimal(line.sequence)) + Decimal(1)\n", " # comput cost if product is service\n", " if prod.type == \"service\":\n", " if round(prod.cost_price,2) == round(line.unit_price,2):\n", " landed_cost = prod.cost_price\n", " else:\n", " landed_cost = prod.cost_price * line.unit_price\n", " vals={\n", " \"quot_id\": obj.id,\n", " \"sequence\": (line.sequence if not line.is_hidden else line.parent_sequence) if line.sequence else cur_line_sequence,\n", " \"product_id\": prod.id,\n", " \"description\": prod.name,\n", " \"supplier_id\": prod.suppliers[0].supplier_id.id if prod.suppliers else None,\n", " \"list_price\": prod.purchase_price,\n", " \"purchase_price\": prod.purchase_price,\n", " #\"landed_cost\": prod.cost_price if prod.type == \"service\" else prod.landed_cost,\n", " \"landed_cost\": landed_cost,\n", " \"purchase_duty_percent\": prod.purchase_duty_percent,\n", " \"purchase_ship_percent\": prod.purchase_ship_percent,\n", " \"qty\": line.qty,\n", " \"currency_id\": prod.purchase_currency_id.id or settings.currency_id.id,\n", " }\n", " get_model(\"quot.cost\").create(vals)\n", "\n", " def merge_quotations(self,ids,context={}):\n", " if len(ids)<2:\n", " raise Exception(\"Can not merge less than two quotations\")\n", " contact_ids=[]\n", " currency_ids=[]\n", " tax_types=[]\n", " for obj in self.browse(ids):\n", " contact_ids.append(obj.contact_id.id)\n", " currency_ids.append(obj.currency_id.id)\n", " tax_types.append(obj.tax_type)\n", " contact_ids=list(set(contact_ids))\n", " currency_ids=list(set(currency_ids))\n", " tax_types=list(set(tax_types))\n", " if len(contact_ids)>1:\n", " raise Exception(\"Quotation customers have to be the same\")\n", " if len(currency_ids)>1:\n", " raise Exception(\"Quotation currencies have to be the same\")\n", " if len(tax_types)>1:\n", " raise Exception(\"Quotation tax types have to be the same\")\n", " vals = {\n", " \"contact_id\": contact_ids[0],\n", " \"currency_id\": currency_ids[0],\n", " \"tax_type\": tax_types[0],\n", " \"lines\": [],\n", " \"est_costs\": [],\n", " }\n", " seq=0\n", " refs=[]\n", " for obj in sorted(self.browse(ids),key=lambda obj: obj.number):\n", " refs.append(obj.number)\n", " seq_map={}\n", " for line in obj.lines:\n", " seq+=1\n", " seq_map[line.sequence]=seq\n", " qty=line.qty or 0\n", " unit_price=line.unit_price or 0\n", " amt=qty*unit_price\n", " disc=amt*(line.discount or 0)/Decimal(100)\n", " line_vals = {\n", " \"sequence\": seq,\n", " \"product_id\": line.product_id.id,\n", " \"description\": line.description,\n", " \"qty\": qty,\n", " \"uom_id\": line.uom_id.id,\n", " \"unit_price\": unit_price,\n", " \"discount\": disc,\n", " \"amount\": amt,\n", " \"tax_id\": line.tax_id.id,\n", " }\n", " vals[\"lines\"].append((\"create\", line_vals))\n", " for cost in obj.est_costs:\n", " cost_vals={\n", " \"sequence\": seq_map.get(cost.sequence),\n", " \"product_id\": cost.product_id.id,\n", " \"description\": cost.description,\n", " \"supplier_id\": cost.supplier_id.id,\n", " \"list_price\": cost.list_price,\n", " \"purchase_price\": cost.purchase_price,\n", " \"landed_cost\": cost.landed_cost,\n", " \"qty\": cost.qty,\n", " \"currency_id\": cost.currency_id.id,\n", " }\n", " vals[\"est_costs\"].append((\"create\",cost_vals))\n", " vals['ref']=', '.join([ref for ref in refs])\n", " new_id = self.create(vals, context=context)\n", " new_obj = self.browse(new_id)\n", " return {\n", " \"next\": {\n", " \"name\": \"quot\",\n", " \"mode\": \"form\",\n", " \"active_id\": new_id,\n", " },\n", " \"flash\": \"Quotations merged\",\n", " }\n", "\n", " def onchange_est_margin(self,context={}):\n", " data=context[\"data\"]\n", " path=context[\"path\"]\n", " line=get_data_path(data,path,parent=True)\n", " margin=line[\"est_margin_percent_input\"]\n", " amt=line[\"est_cost_amount\"]/(1-margin/Decimal(100))\n", " price=round(amt/line[\"qty\"])\n", " line[\"unit_price\"]=price\n", " self.update_amounts(context)\n", " return data\n", "\n", " def get_relative_currency_rate(self,ids,currency_id):\n", " obj=self.browse(ids[0])\n", " rate=None\n", " for r in obj.currency_rates:\n", " if r.currency_id.id==currency_id:\n", " rate=r.rate\n", " break\n", " if rate is None:\n", " rate_from=get_model(\"currency\").get_rate([currency_id],obj.date) or Decimal(1)\n", " rate_to=obj.currency_id.get_rate(obj.date) or Decimal(1)\n", " rate=rate_from/rate_to\n", " return rate\n", "\n", " def update_cost_amount(self,context={}):\n", " data=context['data']\n", " path=context['path']\n", " line=get_data_path(data,path,parent=True)\n", " line['amount']=(line['qty'] or 0) *(line['landed_cost'] or 0)\n", " return data\n", "\n", "SaleQuot.register()\n" ]
[ 0, 0.3333333333333333, 0, 0, 0, 0, 0, 0, 0.3333333333333333, 0.012345679012345678, 0, 0.3333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0.043478260869565216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011235955056179775, 0, 0, 0.0043859649122807015, 0, 0.009009009009009009, 0.009259259259259259, 0.009523809523809525, 0.010752688172043012, 0, 0, 0.011627906976744186, 0, 0.006802721088435374, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011627906976744186, 0.007936507936507936, 0.01020408163265306, 0.008928571428571428, 0, 0, 0, 0, 0.028985507246376812, 0.008771929824561403, 0.00847457627118644, 0.008771929824561403, 0.030927835051546393, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.021739130434782608, 0, 0.058823529411764705, 0, 0, 0, 0.014705882352941176, 0, 0, 0, 0, 0, 0.007462686567164179, 0.011627906976744186, 0.009523809523809525, 0.012345679012345678, 0.024390243902439025, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03571428571428571, 0.018518518518518517, 0.045454545454545456, 0, 0.018518518518518517, 0.022727272727272728, 0, 0, 0.029411764705882353, 0.030303030303030304, 0.02857142857142857, 0.02564102564102564, 0, 0.045454545454545456, 0, 0, 0, 0.027777777777777776, 0, 0.013513513513513514, 0.013513513513513514, 0.034482758620689655, 0.047619047619047616, 0, 0, 0, 0.024390243902439025, 0.05, 0.02127659574468085, 0.02564102564102564, 0.03571428571428571, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03125, 0.05263157894736842, 0, 0.05555555555555555, 0, 0.046511627906976744, 0.05405405405405406, 0.03571428571428571, 0.020833333333333332, 0.024390243902439025, 0.022222222222222223, 0.021739130434782608, 0.03125, 0.041666666666666664, 0, 0, 0, 0, 0, 0.047619047619047616, 0.043478260869565216, 0, 0, 0, 0, 0, 0.03571428571428571, 0, 0, 0.02857142857142857, 0, 0, 0.02040816326530612, 0.018867924528301886, 0.017857142857142856, 0, 0, 0, 0, 0.029850746268656716, 0, 0.015151515151515152, 0, 0, 0, 0, 0, 0.009345794392523364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010752688172043012, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0, 0, 0, 0.010309278350515464, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0.043478260869565216, 0.05555555555555555, 0.03571428571428571, 0.05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02040816326530612, 0, 0, 0, 0, 0.030303030303030304, 0.041666666666666664, 0, 0, 0, 0, 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0, 0, 0.014705882352941176, 0, 0.016666666666666666, 0, 0.041666666666666664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.015625, 0, 0.041666666666666664, 0, 0, 0, 0.014492753623188406, 0.02702702702702703, 0.018867924528301886, 0, 0, 0, 0, 0, 0, 0.03409090909090909, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.037037037037037035, 0, 0, 0.029411764705882353, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02702702702702703, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0.022222222222222223, 0.034482758620689655, 0.034482758620689655, 0.06, 0.02564102564102564, 0, 0.018518518518518517, 0.023809523809523808, 0.0196078431372549, 0.01818181818181818, 0.02040816326530612, 0.038461538461538464, 0.023809523809523808, 0.01639344262295082, 0.014492753623188406, 0.014492753623188406, 0.02040816326530612, 0.015625, 0, 0, 0.014492753623188406, 0, 0, 0, 0, 0, 0.05263157894736842, 0, 0.020833333333333332, 0, 0.024096385542168676, 0, 0, 0, 0, 0, 0, 0, 0.0425531914893617, 0.03125, 0.05263157894736842, 0, 0, 0, 0, 0.019230769230769232, 0, 0, 0, 0.030303030303030304, 0, 0, 0, 0, 0, 0, 0, 0, 0.02857142857142857, 0.038461538461538464, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0273972602739726, 0, 0, 0, 0.05263157894736842, 0, 0.007518796992481203, 0, 0, 0.010752688172043012, 0, 0, 0.020618556701030927, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0.0425531914893617, 0.043478260869565216, 0, 0.043478260869565216, 0.041666666666666664, 0.047619047619047616, 0, 0, 0, 0, 0.023255813953488372, 0.022222222222222223, 0.02564102564102564, 0.03225806451612903, 0, 0.03125, 0, 0.034482758620689655, 0, 0, 0, 0, 0, 0, 0, 0, 0.07142857142857142, 0.0625, 0.013888888888888888, 0, 0.043478260869565216, 0, 0.043478260869565216, 0.023255813953488372, 0.029411764705882353, 0.020833333333333332, 0.02857142857142857, 0.01694915254237288, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03571428571428571, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.015873015873015872, 0.018867924528301886, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.021739130434782608, 0.034482758620689655, 0.034482758620689655, 0.06, 0.020833333333333332, 0.016666666666666666, 0.02702702702702703, 0.030303030303030304, 0, 0, 0, 0.034482758620689655, 0.03125, 0.05555555555555555, 0, 0.021739130434782608, 0.03571428571428571, 0, 0, 0.03296703296703297, 0.014492753623188406, 0.02857142857142857, 0, 0, 0.022222222222222223, 0.034482758620689655, 0.034482758620689655, 0.06, 0.02857142857142857, 0, 0, 0.05 ]
805
0.007282
# Generated by Django 2.0.8 on 2019-01-22 08:38 import uuid from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('taxonomy', '0015_auto_20190122_1637'), ] operations = [ migrations.AlterField( model_name='community', name='source_id', field=models.CharField(default=uuid.UUID('0a13366e-1e21-11e9-a86f-ecf4bb19b5fc'), help_text='The ID of the record in the original source, if available.', max_length=1000, verbose_name='Source ID'), ), migrations.AlterField( model_name='hbvfamily', name='class_name', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Class'), ), migrations.AlterField( model_name='hbvfamily', name='division_name', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Division'), ), migrations.AlterField( model_name='hbvfamily', name='family_name', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Family Name'), ), migrations.AlterField( model_name='hbvfamily', name='kingdom_name', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Kingdom'), ), migrations.AlterField( model_name='hbvfamily', name='order_name', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Order Name'), ), migrations.AlterField( model_name='hbvfamily', name='supra_code', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='HBV Suprafamily Group Code'), ), migrations.AlterField( model_name='hbvgroup', name='class_id', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='HBV Suprafamily Group Code'), ), migrations.AlterField( model_name='hbvparent', name='class_id', field=models.CharField(blank=True, help_text='', max_length=100, null=True, verbose_name='WACensus ClassID'), ), migrations.AlterField( model_name='hbvspecies', name='consv_code', field=models.CharField(blank=True, help_text='', max_length=100, null=True, verbose_name='Conservation Code'), ), migrations.AlterField( model_name='hbvspecies', name='naturalised', field=models.CharField(blank=True, help_text='', max_length=100, null=True, verbose_name='Naturalised'), ), migrations.AlterField( model_name='hbvspecies', name='ranking', field=models.CharField(blank=True, help_text='', max_length=100, null=True, verbose_name='Ranking'), ), migrations.AlterField( model_name='hbvvernacular', name='name', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Name'), ), migrations.AlterField( model_name='hbvvernacular', name='vernacular', field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Vernacular Name'), ), ]
[ "# Generated by Django 2.0.8 on 2019-01-22 08:38\n", "\n", "import uuid\n", "\n", "from django.db import migrations, models\n", "\n", "\n", "class Migration(migrations.Migration):\n", "\n", " dependencies = [\n", " ('taxonomy', '0015_auto_20190122_1637'),\n", " ]\n", "\n", " operations = [\n", " migrations.AlterField(\n", " model_name='community',\n", " name='source_id',\n", " field=models.CharField(default=uuid.UUID('0a13366e-1e21-11e9-a86f-ecf4bb19b5fc'), help_text='The ID of the record in the original source, if available.', max_length=1000, verbose_name='Source ID'),\n", " ),\n", " migrations.AlterField(\n", " model_name='hbvfamily',\n", " name='class_name',\n", " field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Class'),\n", " ),\n", " migrations.AlterField(\n", " model_name='hbvfamily',\n", " name='division_name',\n", " field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Division'),\n", " ),\n", " migrations.AlterField(\n", " model_name='hbvfamily',\n", " name='family_name',\n", " field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Family Name'),\n", " ),\n", " migrations.AlterField(\n", " model_name='hbvfamily',\n", " name='kingdom_name',\n", " field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Kingdom'),\n", " ),\n", " migrations.AlterField(\n", " model_name='hbvfamily',\n", " name='order_name',\n", " field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Order Name'),\n", " ),\n", " migrations.AlterField(\n", " model_name='hbvfamily',\n", " name='supra_code',\n", " field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='HBV Suprafamily Group Code'),\n", " ),\n", " migrations.AlterField(\n", " model_name='hbvgroup',\n", " name='class_id',\n", " field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='HBV Suprafamily Group Code'),\n", " ),\n", " migrations.AlterField(\n", " model_name='hbvparent',\n", " name='class_id',\n", " field=models.CharField(blank=True, help_text='', max_length=100, null=True, verbose_name='WACensus ClassID'),\n", " ),\n", " migrations.AlterField(\n", " model_name='hbvspecies',\n", " name='consv_code',\n", " field=models.CharField(blank=True, help_text='', max_length=100, null=True, verbose_name='Conservation Code'),\n", " ),\n", " migrations.AlterField(\n", " model_name='hbvspecies',\n", " name='naturalised',\n", " field=models.CharField(blank=True, help_text='', max_length=100, null=True, verbose_name='Naturalised'),\n", " ),\n", " migrations.AlterField(\n", " model_name='hbvspecies',\n", " name='ranking',\n", " field=models.CharField(blank=True, help_text='', max_length=100, null=True, verbose_name='Ranking'),\n", " ),\n", " migrations.AlterField(\n", " model_name='hbvvernacular',\n", " name='name',\n", " field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Name'),\n", " ),\n", " migrations.AlterField(\n", " model_name='hbvvernacular',\n", " name='vernacular',\n", " field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Vernacular Name'),\n", " ),\n", " ]\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.004761904761904762, 0, 0, 0, 0, 0.008928571428571428, 0, 0, 0, 0, 0.008695652173913044, 0, 0, 0, 0, 0.00847457627118644, 0, 0, 0, 0, 0.008771929824561403, 0, 0, 0, 0, 0.008547008547008548, 0, 0, 0, 0, 0.007518796992481203, 0, 0, 0, 0, 0.007518796992481203, 0, 0, 0, 0, 0.00819672131147541, 0, 0, 0, 0, 0.008130081300813009, 0, 0, 0, 0, 0.008547008547008548, 0, 0, 0, 0, 0.008849557522123894, 0, 0, 0, 0, 0.009009009009009009, 0, 0, 0, 0, 0.00819672131147541, 0, 0 ]
85
0.001343
import argparse import json import sys import semver import jinja2 from pydeck._version import __version__ RELEASE_TYPES = ["MAJOR", "MINOR", "PATCH", "BETA", "ALPHA", "RC"] def bump(release_type): version_info = semver.parse_version_info(__version__) if release_type == "MAJOR": return version_info.bump_major() elif release_type == "MINOR": return version_info.bump_minor() elif release_type == "PATCH": return version_info.bump_patch() elif release_type == "ALPHA": return version_info.bump_prerelease(token="a") elif release_type == "BETA": return version_info.bump_prerelease(token="b") elif release_type == "RC": return version_info.bump_prerelease(token="rc") else: raise Exception("Release type must be one of the following:", ", ".join(RELEASE_TYPES)) def rewrite_version_file(semver): with open("pydeck/_version.py", "w+") as f: t = jinja2.Template("__version__ = '{{semver_str}}'") contents = t.render(semver_str=str(semver)) f.write(contents) def rewrite_frontend_version_file(): """Current associated version of NPM modules deck.gl and @deck.gl/jupyter-widget""" lerna_version = json.loads(open("../../lerna.json").read())["version"] with open("pydeck/frontend_semver.py", "w+") as f: t = jinja2.Template("DECKGL_SEMVER = '{{semver_str}}'") contents = t.render(semver_str=str(lerna_version)) f.write(contents) return lerna_version parser = argparse.ArgumentParser(description="Bump semver for pydeck. Modifies pydeck/_version.py directly.") parser.add_argument("release_type", action="store", choices=RELEASE_TYPES, help="Release type to bump") parser.add_argument("-y", "--yes", action="store_true", dest="yes", help="Automatically answer yes") if __name__ == "__main__": args = parser.parse_args() should_accept_bump = args.yes bumped_version = bump(args.release_type) inform_bump = "Raising pydeck {} to {}".format(__version__, str(bumped_version)) print(inform_bump) if not should_accept_bump: prompt = "Proceed? (Y/n) " response = input(prompt) if response != "Y": sys.exit(0) rewrite_version_file(bumped_version) deckgl_version = rewrite_frontend_version_file() print("Locked to deck.gl@{}".format(deckgl_version)) print(bumped_version)
[ "import argparse\n", "import json\n", "import sys\n", "\n", "import semver\n", "import jinja2\n", "\n", "from pydeck._version import __version__\n", "\n", "RELEASE_TYPES = [\"MAJOR\", \"MINOR\", \"PATCH\", \"BETA\", \"ALPHA\", \"RC\"]\n", "\n", "\n", "def bump(release_type):\n", " version_info = semver.parse_version_info(__version__)\n", " if release_type == \"MAJOR\":\n", " return version_info.bump_major()\n", " elif release_type == \"MINOR\":\n", " return version_info.bump_minor()\n", " elif release_type == \"PATCH\":\n", " return version_info.bump_patch()\n", " elif release_type == \"ALPHA\":\n", " return version_info.bump_prerelease(token=\"a\")\n", " elif release_type == \"BETA\":\n", " return version_info.bump_prerelease(token=\"b\")\n", " elif release_type == \"RC\":\n", " return version_info.bump_prerelease(token=\"rc\")\n", " else:\n", " raise Exception(\"Release type must be one of the following:\", \", \".join(RELEASE_TYPES))\n", "\n", "\n", "def rewrite_version_file(semver):\n", " with open(\"pydeck/_version.py\", \"w+\") as f:\n", " t = jinja2.Template(\"__version__ = '{{semver_str}}'\")\n", " contents = t.render(semver_str=str(semver))\n", " f.write(contents)\n", "\n", "\n", "def rewrite_frontend_version_file():\n", " \"\"\"Current associated version of NPM modules deck.gl and @deck.gl/jupyter-widget\"\"\"\n", " lerna_version = json.loads(open(\"../../lerna.json\").read())[\"version\"]\n", " with open(\"pydeck/frontend_semver.py\", \"w+\") as f:\n", " t = jinja2.Template(\"DECKGL_SEMVER = '{{semver_str}}'\")\n", " contents = t.render(semver_str=str(lerna_version))\n", " f.write(contents)\n", " return lerna_version\n", "\n", "\n", "parser = argparse.ArgumentParser(description=\"Bump semver for pydeck. Modifies pydeck/_version.py directly.\")\n", "parser.add_argument(\"release_type\", action=\"store\", choices=RELEASE_TYPES, help=\"Release type to bump\")\n", "parser.add_argument(\"-y\", \"--yes\", action=\"store_true\", dest=\"yes\", help=\"Automatically answer yes\")\n", "\n", "\n", "if __name__ == \"__main__\":\n", " args = parser.parse_args()\n", " should_accept_bump = args.yes\n", " bumped_version = bump(args.release_type)\n", " inform_bump = \"Raising pydeck {} to {}\".format(__version__, str(bumped_version))\n", " print(inform_bump)\n", " if not should_accept_bump:\n", " prompt = \"Proceed? (Y/n) \"\n", " response = input(prompt)\n", " if response != \"Y\":\n", " sys.exit(0)\n", " rewrite_version_file(bumped_version)\n", " deckgl_version = rewrite_frontend_version_file()\n", " print(\"Locked to deck.gl@{}\".format(deckgl_version))\n", " print(bumped_version)\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010416666666666666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0.00909090909090909, 0.009615384615384616, 0.009900990099009901, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
67
0.000928
import datetime import json import logging import pytz from google.appengine.ext import ndb from helpers.match_helper import MatchHelper from models.event import Event from models.match import Match QF_SF_MAP = { 1: (1, 3), # in sf1, qf seeds 2 and 4 play. 0-indexed becomes 1, 3 2: (0, 2), 3: (1, 2), 4: (0, 3), 5: (2, 3), 6: (0, 1) } LAST_LEVEL = { 'sf': 'qf', 'f': 'sf' } TIME_PATTERN = "%Y-%m-%dT%H:%M:%S" ELIM_MAPPING = { 1: (1, 1), # (set, match) 2: (2, 1), 3: (3, 1), 4: (4, 1), 5: (1, 2), 6: (2, 2), 7: (3, 2), 8: (4, 2), 9: (1, 3), 10: (2, 3), 11: (3, 3), 12: (4, 3), 13: (1, 1), 14: (2, 1), 15: (1, 2), 16: (2, 2), 17: (1, 3), 18: (2, 3), 19: (1, 1), 20: (1, 2), 21: (1, 3), 22: (1, 4), 23: (1, 5), 24: (1, 6), } OCTO_ELIM_MAPPING = { # octofinals 1: (1, 1), # (set, match) 2: (2, 1), 3: (3, 1), 4: (4, 1), 5: (5, 1), 6: (6, 1), 7: (7, 1), 8: (8, 1), 9: (1, 2), 10: (2, 2), 11: (3, 2), 12: (4, 2), 13: (5, 2), 14: (6, 2), 15: (7, 2), 16: (8, 2), 17: (1, 3), 18: (2, 3), 19: (3, 3), 20: (4, 3), 21: (5, 3), 22: (6, 3), 23: (7, 3), 24: (8, 3), # quarterfinals 25: (1, 1), 26: (2, 1), 27: (3, 1), 28: (4, 1), 29: (1, 2), 30: (2, 2), 31: (3, 2), 32: (4, 2), 33: (1, 3), 34: (2, 3), 35: (3, 3), 36: (4, 3), # semifinals 37: (1, 1), 38: (2, 1), 39: (1, 2), 40: (2, 2), 41: (1, 3), 42: (2, 3), # finals 43: (1, 1), 44: (1, 2), 45: (1, 3), 46: (1, 4), 47: (1, 5), 48: (1, 6), } def get_comp_level(year, match_level, match_number, is_octofinals): if match_level == 'Qualification': return 'qm' else: if year == 2015: if match_number <= 8: return 'qf' elif match_number <= 14: return 'sf' else: return 'f' else: if is_octofinals: return get_comp_level_octo(year, match_number) if match_number <= 12: return 'qf' elif match_number <= 18: return 'sf' else: return 'f' def get_comp_level_octo(year, match_number): """ No 2015 support """ if match_number <= 24: return 'ef' elif match_number <= 36: return 'qf' elif match_number <= 42: return 'sf' else: return 'f' def get_set_match_number(year, comp_level, match_number, is_octofinals): if year == 2015: if comp_level == 'sf': return 1, match_number - 8 elif comp_level == 'f': return 1, match_number - 14 else: # qm, qf return 1, match_number else: if comp_level in {'ef', 'qf', 'sf', 'f'}: return OCTO_ELIM_MAPPING[match_number] if is_octofinals else ELIM_MAPPING[match_number] else: # qm return 1, match_number class FMSAPIHybridScheduleParser(object): def __init__(self, year, event_short): self.year = year self.event_short = event_short @classmethod def is_blank_match(cls, match): """ Detect junk playoff matches like in 2017scmb """ if match.comp_level == 'qm' or not match.score_breakdown: return False for color in ['red', 'blue']: if match.alliances[color]['score'] != 0: return False for value in match.score_breakdown[color].values(): if value and value not in {'Unknown', 'None'}: # Nonzero, False, blank, None, etc. return False return True def parse(self, response): matches = response['Schedule'] event_key = '{}{}'.format(self.year, self.event_short) event = Event.get_by_id(event_key) if event.timezone_id: event_tz = pytz.timezone(event.timezone_id) else: logging.warning("Event {} has no timezone! Match times may be wrong.".format(event_key)) event_tz = None parsed_matches = [] remapped_matches = {} # If a key changes due to a tiebreaker is_octofinals = len(matches) > 0 and 'Octofinal' in matches[0]['description'] for match in matches: if 'tournamentLevel' in match: # 2016+ level = match['tournamentLevel'] else: # 2015 level = match['level'] comp_level = get_comp_level(self.year, level, match['matchNumber'], is_octofinals) set_number, match_number = get_set_match_number(self.year, comp_level, match['matchNumber'], is_octofinals) red_teams = [] blue_teams = [] red_surrogates = [] blue_surrogates = [] team_key_names = [] null_team = False sorted_teams = sorted(match['Teams'], key=lambda team: team['station']) # Sort by station to ensure correct ordering. Kind of hacky. for team in sorted_teams: if team['teamNumber'] is None: null_team = True team_key = 'frc{}'.format(team['teamNumber']) team_key_names.append(team_key) if 'Red' in team['station']: red_teams.append(team_key) if team['surrogate']: red_surrogates.append(team_key) elif 'Blue' in team['station']: blue_teams.append(team_key) if team['surrogate']: blue_surrogates.append(team_key) if null_team and match['scoreRedFinal'] is None and match['scoreBlueFinal'] is None: continue alliances = { 'red': { 'teams': red_teams, 'surrogates': red_surrogates, 'score': match['scoreRedFinal'] }, 'blue': { 'teams': blue_teams, 'surrogates': blue_surrogates, 'score': match['scoreBlueFinal'] }, } if not match['startTime']: # no startTime means it's an unneeded rubber match continue time = datetime.datetime.strptime(match['startTime'].split('.')[0], TIME_PATTERN) if event_tz is not None: time = time - event_tz.utcoffset(time) actual_time_raw = match['actualStartTime'] if 'actualStartTime' in match else None actual_time = None if actual_time_raw is not None: actual_time = datetime.datetime.strptime(actual_time_raw.split('.')[0], TIME_PATTERN) if event_tz is not None: actual_time = actual_time - event_tz.utcoffset(actual_time) post_result_time_raw = match.get('postResultTime') post_result_time = None if post_result_time_raw is not None: post_result_time = datetime.datetime.strptime(post_result_time_raw.split('.')[0], TIME_PATTERN) if event_tz is not None: post_result_time = post_result_time - event_tz.utcoffset(post_result_time) key_name = Match.renderKeyName( event_key, comp_level, set_number, match_number) # Check for tiebreaker matches existing_match = Match.get_by_id(key_name) # Follow chain of existing matches while existing_match is not None and existing_match.tiebreak_match_key is not None: logging.info("Following Match {} to {}".format(existing_match.key.id(), existing_match.tiebreak_match_key.id())) existing_match = existing_match.tiebreak_match_key.get() # Check if last existing match needs to be tiebroken if existing_match and existing_match.comp_level != 'qm' and \ existing_match.has_been_played and \ existing_match.winning_alliance == '' and \ existing_match.actual_time != actual_time and \ not self.is_blank_match(existing_match): logging.warning("Match {} is tied!".format(existing_match.key.id())) # TODO: Only query within set if set_number ever gets indexed match_count = 0 for match_key in Match.query(Match.event==event.key, Match.comp_level==comp_level).fetch(keys_only=True): _, match_key = match_key.id().split('_') if match_key.startswith('{}{}'.format(comp_level, set_number)): match_count += 1 # Sanity check: Tiebreakers must be played after at least 3 matches, or 6 for finals if match_count < 3 or (match_count < 6 and comp_level == 'f'): logging.warning("Match supposedly tied, but existing count is {}! Skipping match.".format(match_count)) continue match_number = match_count + 1 new_key_name = Match.renderKeyName( event_key, comp_level, set_number, match_number) remapped_matches[key_name] = new_key_name key_name = new_key_name # Point existing match to new tiebreaker match existing_match.tiebreak_match_key = ndb.Key(Match, key_name) parsed_matches.append(existing_match) logging.warning("Creating new match: {}".format(key_name)) elif existing_match: remapped_matches[key_name] = existing_match.key.id() key_name = existing_match.key.id() match_number = existing_match.match_number parsed_matches.append(Match( id=key_name, event=event.key, year=event.year, set_number=set_number, match_number=match_number, comp_level=comp_level, team_key_names=team_key_names, time=time, actual_time=actual_time, post_result_time=post_result_time, alliances_json=json.dumps(alliances), )) if self.year == 2015: # Fix null teams in elims (due to FMS API failure, some info not complete) # Should only happen for sf and f matches organized_matches = MatchHelper.organizeMatches(parsed_matches) for level in ['sf', 'f']: playoff_advancement = MatchHelper.generatePlayoffAdvancement2015(organized_matches) if playoff_advancement[LAST_LEVEL[level]] != []: for match in organized_matches[level]: if 'frcNone' in match.team_key_names: if level == 'sf': red_seed, blue_seed = QF_SF_MAP[match.match_number] else: red_seed = 0 blue_seed = 1 red_teams = ['frc{}'.format(t) for t in playoff_advancement[LAST_LEVEL[level]][red_seed][0]] blue_teams = ['frc{}'.format(t) for t in playoff_advancement[LAST_LEVEL[level]][blue_seed][0]] alliances = match.alliances alliances['red']['teams'] = red_teams alliances['blue']['teams'] = blue_teams match.alliances_json = json.dumps(alliances) match.team_key_names = red_teams + blue_teams fixed_matches = [] for key, matches in organized_matches.items(): if key != 'num': for match in matches: if 'frcNone' not in match.team_key_names: fixed_matches.append(match) parsed_matches = fixed_matches return parsed_matches, remapped_matches class FMSAPIMatchDetailsParser(object): def __init__(self, year, event_short): self.year = year self.event_short = event_short def parse(self, response): matches = response['MatchScores'] match_details_by_key = {} is_octofinals = len(matches) > 0 and matches[len(matches) - 1]['matchNumber'] > 23 # Hacky; this should be 24. Banking on the fact that 3 tiebreakers is rare for match in matches: comp_level = get_comp_level(self.year, match['matchLevel'], match['matchNumber'], is_octofinals) set_number, match_number = get_set_match_number(self.year, comp_level, match['matchNumber'], is_octofinals) breakdown = { 'red': {}, 'blue': {}, } if 'coopertition' in match: breakdown['coopertition'] = match['coopertition'] if 'coopertitionPoints' in match: breakdown['coopertition_points'] = match['coopertitionPoints'] for alliance in match['Alliances']: color = alliance['alliance'].lower() for key, value in alliance.items(): if key != 'alliance': breakdown[color][key] = value match_details_by_key[Match.renderKeyName( '{}{}'.format(self.year, self.event_short), comp_level, set_number, match_number)] = breakdown return match_details_by_key
[ "import datetime\n", "import json\n", "import logging\n", "import pytz\n", "\n", "from google.appengine.ext import ndb\n", "\n", "from helpers.match_helper import MatchHelper\n", "from models.event import Event\n", "from models.match import Match\n", "\n", "QF_SF_MAP = {\n", " 1: (1, 3), # in sf1, qf seeds 2 and 4 play. 0-indexed becomes 1, 3\n", " 2: (0, 2),\n", " 3: (1, 2),\n", " 4: (0, 3),\n", " 5: (2, 3),\n", " 6: (0, 1)\n", "}\n", "\n", "LAST_LEVEL = {\n", " 'sf': 'qf',\n", " 'f': 'sf'\n", "}\n", "\n", "TIME_PATTERN = \"%Y-%m-%dT%H:%M:%S\"\n", "\n", "ELIM_MAPPING = {\n", " 1: (1, 1), # (set, match)\n", " 2: (2, 1),\n", " 3: (3, 1),\n", " 4: (4, 1),\n", " 5: (1, 2),\n", " 6: (2, 2),\n", " 7: (3, 2),\n", " 8: (4, 2),\n", " 9: (1, 3),\n", " 10: (2, 3),\n", " 11: (3, 3),\n", " 12: (4, 3),\n", " 13: (1, 1),\n", " 14: (2, 1),\n", " 15: (1, 2),\n", " 16: (2, 2),\n", " 17: (1, 3),\n", " 18: (2, 3),\n", " 19: (1, 1),\n", " 20: (1, 2),\n", " 21: (1, 3),\n", " 22: (1, 4),\n", " 23: (1, 5),\n", " 24: (1, 6),\n", "}\n", "\n", "OCTO_ELIM_MAPPING = {\n", " # octofinals\n", " 1: (1, 1), # (set, match)\n", " 2: (2, 1),\n", " 3: (3, 1),\n", " 4: (4, 1),\n", " 5: (5, 1),\n", " 6: (6, 1),\n", " 7: (7, 1),\n", " 8: (8, 1),\n", " 9: (1, 2),\n", " 10: (2, 2),\n", " 11: (3, 2),\n", " 12: (4, 2),\n", " 13: (5, 2),\n", " 14: (6, 2),\n", " 15: (7, 2),\n", " 16: (8, 2),\n", " 17: (1, 3),\n", " 18: (2, 3),\n", " 19: (3, 3),\n", " 20: (4, 3),\n", " 21: (5, 3),\n", " 22: (6, 3),\n", " 23: (7, 3),\n", " 24: (8, 3),\n", "\n", " # quarterfinals\n", " 25: (1, 1),\n", " 26: (2, 1),\n", " 27: (3, 1),\n", " 28: (4, 1),\n", " 29: (1, 2),\n", " 30: (2, 2),\n", " 31: (3, 2),\n", " 32: (4, 2),\n", " 33: (1, 3),\n", " 34: (2, 3),\n", " 35: (3, 3),\n", " 36: (4, 3),\n", "\n", " # semifinals\n", " 37: (1, 1),\n", " 38: (2, 1),\n", " 39: (1, 2),\n", " 40: (2, 2),\n", " 41: (1, 3),\n", " 42: (2, 3),\n", "\n", " # finals\n", " 43: (1, 1),\n", " 44: (1, 2),\n", " 45: (1, 3),\n", " 46: (1, 4),\n", " 47: (1, 5),\n", " 48: (1, 6),\n", "}\n", "\n", "\n", "def get_comp_level(year, match_level, match_number, is_octofinals):\n", " if match_level == 'Qualification':\n", " return 'qm'\n", " else:\n", " if year == 2015:\n", " if match_number <= 8:\n", " return 'qf'\n", " elif match_number <= 14:\n", " return 'sf'\n", " else:\n", " return 'f'\n", " else:\n", " if is_octofinals:\n", " return get_comp_level_octo(year, match_number)\n", " if match_number <= 12:\n", " return 'qf'\n", " elif match_number <= 18:\n", " return 'sf'\n", " else:\n", " return 'f'\n", "\n", "\n", "def get_comp_level_octo(year, match_number):\n", " \"\"\" No 2015 support \"\"\"\n", " if match_number <= 24:\n", " return 'ef'\n", " elif match_number <= 36:\n", " return 'qf'\n", " elif match_number <= 42:\n", " return 'sf'\n", " else:\n", " return 'f'\n", "\n", "\n", "def get_set_match_number(year, comp_level, match_number, is_octofinals):\n", " if year == 2015:\n", " if comp_level == 'sf':\n", " return 1, match_number - 8\n", " elif comp_level == 'f':\n", " return 1, match_number - 14\n", " else: # qm, qf\n", " return 1, match_number\n", " else:\n", " if comp_level in {'ef', 'qf', 'sf', 'f'}:\n", " return OCTO_ELIM_MAPPING[match_number] if is_octofinals else ELIM_MAPPING[match_number]\n", " else: # qm\n", " return 1, match_number\n", "\n", "\n", "class FMSAPIHybridScheduleParser(object):\n", "\n", " def __init__(self, year, event_short):\n", " self.year = year\n", " self.event_short = event_short\n", "\n", " @classmethod\n", " def is_blank_match(cls, match):\n", " \"\"\"\n", " Detect junk playoff matches like in 2017scmb\n", " \"\"\"\n", " if match.comp_level == 'qm' or not match.score_breakdown:\n", " return False\n", " for color in ['red', 'blue']:\n", " if match.alliances[color]['score'] != 0:\n", " return False\n", " for value in match.score_breakdown[color].values():\n", " if value and value not in {'Unknown', 'None'}: # Nonzero, False, blank, None, etc.\n", " return False\n", " return True\n", "\n", " def parse(self, response):\n", " matches = response['Schedule']\n", "\n", " event_key = '{}{}'.format(self.year, self.event_short)\n", " event = Event.get_by_id(event_key)\n", " if event.timezone_id:\n", " event_tz = pytz.timezone(event.timezone_id)\n", " else:\n", " logging.warning(\"Event {} has no timezone! Match times may be wrong.\".format(event_key))\n", " event_tz = None\n", "\n", " parsed_matches = []\n", " remapped_matches = {} # If a key changes due to a tiebreaker\n", " is_octofinals = len(matches) > 0 and 'Octofinal' in matches[0]['description']\n", " for match in matches:\n", " if 'tournamentLevel' in match: # 2016+\n", " level = match['tournamentLevel']\n", " else: # 2015\n", " level = match['level']\n", " comp_level = get_comp_level(self.year, level, match['matchNumber'], is_octofinals)\n", " set_number, match_number = get_set_match_number(self.year, comp_level, match['matchNumber'], is_octofinals)\n", "\n", " red_teams = []\n", " blue_teams = []\n", " red_surrogates = []\n", " blue_surrogates = []\n", " team_key_names = []\n", " null_team = False\n", " sorted_teams = sorted(match['Teams'], key=lambda team: team['station']) # Sort by station to ensure correct ordering. Kind of hacky.\n", " for team in sorted_teams:\n", " if team['teamNumber'] is None:\n", " null_team = True\n", " team_key = 'frc{}'.format(team['teamNumber'])\n", " team_key_names.append(team_key)\n", " if 'Red' in team['station']:\n", " red_teams.append(team_key)\n", " if team['surrogate']:\n", " red_surrogates.append(team_key)\n", " elif 'Blue' in team['station']:\n", " blue_teams.append(team_key)\n", " if team['surrogate']:\n", " blue_surrogates.append(team_key)\n", "\n", " if null_team and match['scoreRedFinal'] is None and match['scoreBlueFinal'] is None:\n", " continue\n", "\n", " alliances = {\n", " 'red': {\n", " 'teams': red_teams,\n", " 'surrogates': red_surrogates,\n", " 'score': match['scoreRedFinal']\n", " },\n", " 'blue': {\n", " 'teams': blue_teams,\n", " 'surrogates': blue_surrogates,\n", " 'score': match['scoreBlueFinal']\n", " },\n", " }\n", "\n", " if not match['startTime']: # no startTime means it's an unneeded rubber match\n", " continue\n", "\n", " time = datetime.datetime.strptime(match['startTime'].split('.')[0], TIME_PATTERN)\n", " if event_tz is not None:\n", " time = time - event_tz.utcoffset(time)\n", "\n", " actual_time_raw = match['actualStartTime'] if 'actualStartTime' in match else None\n", " actual_time = None\n", " if actual_time_raw is not None:\n", " actual_time = datetime.datetime.strptime(actual_time_raw.split('.')[0], TIME_PATTERN)\n", " if event_tz is not None:\n", " actual_time = actual_time - event_tz.utcoffset(actual_time)\n", "\n", " post_result_time_raw = match.get('postResultTime')\n", " post_result_time = None\n", " if post_result_time_raw is not None:\n", " post_result_time = datetime.datetime.strptime(post_result_time_raw.split('.')[0], TIME_PATTERN)\n", " if event_tz is not None:\n", " post_result_time = post_result_time - event_tz.utcoffset(post_result_time)\n", "\n", " key_name = Match.renderKeyName(\n", " event_key,\n", " comp_level,\n", " set_number,\n", " match_number)\n", "\n", " # Check for tiebreaker matches\n", " existing_match = Match.get_by_id(key_name)\n", " # Follow chain of existing matches\n", " while existing_match is not None and existing_match.tiebreak_match_key is not None:\n", " logging.info(\"Following Match {} to {}\".format(existing_match.key.id(), existing_match.tiebreak_match_key.id()))\n", " existing_match = existing_match.tiebreak_match_key.get()\n", " # Check if last existing match needs to be tiebroken\n", " if existing_match and existing_match.comp_level != 'qm' and \\\n", " existing_match.has_been_played and \\\n", " existing_match.winning_alliance == '' and \\\n", " existing_match.actual_time != actual_time and \\\n", " not self.is_blank_match(existing_match):\n", " logging.warning(\"Match {} is tied!\".format(existing_match.key.id()))\n", "\n", " # TODO: Only query within set if set_number ever gets indexed\n", " match_count = 0\n", " for match_key in Match.query(Match.event==event.key, Match.comp_level==comp_level).fetch(keys_only=True):\n", " _, match_key = match_key.id().split('_')\n", " if match_key.startswith('{}{}'.format(comp_level, set_number)):\n", " match_count += 1\n", "\n", " # Sanity check: Tiebreakers must be played after at least 3 matches, or 6 for finals\n", " if match_count < 3 or (match_count < 6 and comp_level == 'f'):\n", " logging.warning(\"Match supposedly tied, but existing count is {}! Skipping match.\".format(match_count))\n", " continue\n", "\n", " match_number = match_count + 1\n", " new_key_name = Match.renderKeyName(\n", " event_key,\n", " comp_level,\n", " set_number,\n", " match_number)\n", " remapped_matches[key_name] = new_key_name\n", " key_name = new_key_name\n", "\n", " # Point existing match to new tiebreaker match\n", " existing_match.tiebreak_match_key = ndb.Key(Match, key_name)\n", " parsed_matches.append(existing_match)\n", "\n", " logging.warning(\"Creating new match: {}\".format(key_name))\n", " elif existing_match:\n", " remapped_matches[key_name] = existing_match.key.id()\n", " key_name = existing_match.key.id()\n", " match_number = existing_match.match_number\n", "\n", " parsed_matches.append(Match(\n", " id=key_name,\n", " event=event.key,\n", " year=event.year,\n", " set_number=set_number,\n", " match_number=match_number,\n", " comp_level=comp_level,\n", " team_key_names=team_key_names,\n", " time=time,\n", " actual_time=actual_time,\n", " post_result_time=post_result_time,\n", " alliances_json=json.dumps(alliances),\n", " ))\n", "\n", " if self.year == 2015:\n", " # Fix null teams in elims (due to FMS API failure, some info not complete)\n", " # Should only happen for sf and f matches\n", " organized_matches = MatchHelper.organizeMatches(parsed_matches)\n", " for level in ['sf', 'f']:\n", " playoff_advancement = MatchHelper.generatePlayoffAdvancement2015(organized_matches)\n", " if playoff_advancement[LAST_LEVEL[level]] != []:\n", " for match in organized_matches[level]:\n", " if 'frcNone' in match.team_key_names:\n", " if level == 'sf':\n", " red_seed, blue_seed = QF_SF_MAP[match.match_number]\n", " else:\n", " red_seed = 0\n", " blue_seed = 1\n", " red_teams = ['frc{}'.format(t) for t in playoff_advancement[LAST_LEVEL[level]][red_seed][0]]\n", " blue_teams = ['frc{}'.format(t) for t in playoff_advancement[LAST_LEVEL[level]][blue_seed][0]]\n", "\n", " alliances = match.alliances\n", " alliances['red']['teams'] = red_teams\n", " alliances['blue']['teams'] = blue_teams\n", " match.alliances_json = json.dumps(alliances)\n", " match.team_key_names = red_teams + blue_teams\n", "\n", " fixed_matches = []\n", " for key, matches in organized_matches.items():\n", " if key != 'num':\n", " for match in matches:\n", " if 'frcNone' not in match.team_key_names:\n", " fixed_matches.append(match)\n", " parsed_matches = fixed_matches\n", "\n", " return parsed_matches, remapped_matches\n", "\n", "\n", "class FMSAPIMatchDetailsParser(object):\n", " def __init__(self, year, event_short):\n", " self.year = year\n", " self.event_short = event_short\n", "\n", " def parse(self, response):\n", " matches = response['MatchScores']\n", "\n", " match_details_by_key = {}\n", "\n", " is_octofinals = len(matches) > 0 and matches[len(matches) - 1]['matchNumber'] > 23 # Hacky; this should be 24. Banking on the fact that 3 tiebreakers is rare\n", " for match in matches:\n", " comp_level = get_comp_level(self.year, match['matchLevel'], match['matchNumber'], is_octofinals)\n", " set_number, match_number = get_set_match_number(self.year, comp_level, match['matchNumber'], is_octofinals)\n", " breakdown = {\n", " 'red': {},\n", " 'blue': {},\n", " }\n", " if 'coopertition' in match:\n", " breakdown['coopertition'] = match['coopertition']\n", " if 'coopertitionPoints' in match:\n", " breakdown['coopertition_points'] = match['coopertitionPoints']\n", " for alliance in match['Alliances']:\n", " color = alliance['alliance'].lower()\n", " for key, value in alliance.items():\n", " if key != 'alliance':\n", " breakdown[color][key] = value\n", "\n", " match_details_by_key[Match.renderKeyName(\n", " '{}{}'.format(self.year, self.event_short),\n", " comp_level,\n", " set_number,\n", " match_number)] = breakdown\n", "\n", " return match_details_by_key\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.019801980198019802, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009900990099009901, 0, 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0.010526315789473684, 0.008333333333333333, 0, 0, 0, 0, 0, 0, 0, 0.00684931506849315, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010309278350515464, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01098901098901099, 0, 0, 0.010638297872340425, 0, 0, 0, 0.010526315789473684, 0, 0, 0.00980392156862745, 0, 0, 0, 0, 0, 0, 0.008928571428571428, 0, 0.010526315789473684, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010416666666666666, 0.007751937984496124, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0.02459016393442623, 0, 0.011904761904761904, 0, 0, 0.009900990099009901, 0, 0.008064516129032258, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0.01, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0.008264462809917356, 0.008130081300813009, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.005988023952095809, 0, 0.009174311926605505, 0.008333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
397
0.000772
# Copyright [2015] Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.remote import create_guest_client from trove.common import utils from trove.db import get_db_api from trove.guestagent.db import models as guest_models from trove.instance import models as base_models CONF = cfg.CONF LOG = logging.getLogger(__name__) def load_and_verify(context, instance_id): # Load InstanceServiceStatus to verify if its running instance = base_models.Instance.load(context, instance_id) if not instance.is_datastore_running: raise exception.UnprocessableEntity( "Instance %s is not ready." % instance.id) else: return instance class Root(object): @classmethod def load(cls, context, instance_id): load_and_verify(context, instance_id) # TODO(pdmars): remove the is_root_enabled call from the guest agent, # just check the database for this information. # If the root history returns null or raises an exception, the root # user hasn't been enabled. try: root_history = RootHistory.load(context, instance_id) except exception.NotFound: return False if not root_history: return False return True @classmethod def create(cls, context, instance_id, user, root_password, cluster_instances_list=None): load_and_verify(context, instance_id) if root_password: root = create_guest_client(context, instance_id).enable_root_with_password( root_password) else: root = create_guest_client(context, instance_id).enable_root() root_user = guest_models.RootUser() root_user.deserialize(root) # if cluster_instances_list none, then root create is called for # single instance, adding an RootHistory entry for the instance_id if cluster_instances_list is None: RootHistory.create(context, instance_id, user) return root_user @classmethod def delete(cls, context, instance_id): load_and_verify(context, instance_id) create_guest_client(context, instance_id).disable_root() class ClusterRoot(Root): @classmethod def create(cls, context, instance_id, user, root_password, cluster_instances_list=None): root_user = super(ClusterRoot, cls).create(context, instance_id, user, root_password, cluster_instances_list=None) if cluster_instances_list: for instance in cluster_instances_list: RootHistory.create(context, instance, user) return root_user class RootHistory(object): _auto_generated_attrs = ['id'] _data_fields = ['instance_id', 'user', 'created'] _table_name = 'root_enabled_history' def __init__(self, instance_id, user): self.id = instance_id self.user = user self.created = utils.utcnow() def save(self): LOG.debug("Saving %(name)s: %(dict)s" % {'name': self.__class__.__name__, 'dict': self.__dict__}) return get_db_api().save(self) @classmethod def load(cls, context, instance_id): history = get_db_api().find_by(cls, id=instance_id) return history @classmethod def create(cls, context, instance_id, user): history = cls.load(context, instance_id) if history is not None: return history history = RootHistory(instance_id, user) return history.save()
[ "# Copyright [2015] Hewlett-Packard Development Company, L.P.\n", "# All Rights Reserved.\n", "#\n", "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n", "# not use this file except in compliance with the License. You may obtain\n", "# a copy of the License at\n", "#\n", "# http://www.apache.org/licenses/LICENSE-2.0\n", "#\n", "# Unless required by applicable law or agreed to in writing, software\n", "# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n", "# License for the specific language governing permissions and limitations\n", "# under the License.\n", "\n", "from oslo_log import log as logging\n", "\n", "from trove.common import cfg\n", "from trove.common import exception\n", "from trove.common.remote import create_guest_client\n", "from trove.common import utils\n", "from trove.db import get_db_api\n", "from trove.guestagent.db import models as guest_models\n", "from trove.instance import models as base_models\n", "\n", "\n", "CONF = cfg.CONF\n", "LOG = logging.getLogger(__name__)\n", "\n", "\n", "def load_and_verify(context, instance_id):\n", " # Load InstanceServiceStatus to verify if its running\n", " instance = base_models.Instance.load(context, instance_id)\n", " if not instance.is_datastore_running:\n", " raise exception.UnprocessableEntity(\n", " \"Instance %s is not ready.\" % instance.id)\n", " else:\n", " return instance\n", "\n", "\n", "class Root(object):\n", "\n", " @classmethod\n", " def load(cls, context, instance_id):\n", " load_and_verify(context, instance_id)\n", " # TODO(pdmars): remove the is_root_enabled call from the guest agent,\n", " # just check the database for this information.\n", " # If the root history returns null or raises an exception, the root\n", " # user hasn't been enabled.\n", " try:\n", " root_history = RootHistory.load(context, instance_id)\n", " except exception.NotFound:\n", " return False\n", " if not root_history:\n", " return False\n", " return True\n", "\n", " @classmethod\n", " def create(cls, context, instance_id, user, root_password,\n", " cluster_instances_list=None):\n", " load_and_verify(context, instance_id)\n", " if root_password:\n", " root = create_guest_client(context,\n", " instance_id).enable_root_with_password(\n", " root_password)\n", " else:\n", " root = create_guest_client(context, instance_id).enable_root()\n", "\n", " root_user = guest_models.RootUser()\n", " root_user.deserialize(root)\n", "\n", " # if cluster_instances_list none, then root create is called for\n", " # single instance, adding an RootHistory entry for the instance_id\n", " if cluster_instances_list is None:\n", " RootHistory.create(context, instance_id, user)\n", "\n", " return root_user\n", "\n", " @classmethod\n", " def delete(cls, context, instance_id):\n", " load_and_verify(context, instance_id)\n", " create_guest_client(context, instance_id).disable_root()\n", "\n", "\n", "class ClusterRoot(Root):\n", "\n", " @classmethod\n", " def create(cls, context, instance_id, user, root_password,\n", " cluster_instances_list=None):\n", " root_user = super(ClusterRoot, cls).create(context, instance_id,\n", " user, root_password,\n", " cluster_instances_list=None)\n", "\n", " if cluster_instances_list:\n", " for instance in cluster_instances_list:\n", " RootHistory.create(context, instance, user)\n", "\n", " return root_user\n", "\n", "\n", "class RootHistory(object):\n", "\n", " _auto_generated_attrs = ['id']\n", " _data_fields = ['instance_id', 'user', 'created']\n", " _table_name = 'root_enabled_history'\n", "\n", " def __init__(self, instance_id, user):\n", " self.id = instance_id\n", " self.user = user\n", " self.created = utils.utcnow()\n", "\n", " def save(self):\n", " LOG.debug(\"Saving %(name)s: %(dict)s\" %\n", " {'name': self.__class__.__name__, 'dict': self.__dict__})\n", " return get_db_api().save(self)\n", "\n", " @classmethod\n", " def load(cls, context, instance_id):\n", " history = get_db_api().find_by(cls, id=instance_id)\n", " return history\n", "\n", " @classmethod\n", " def create(cls, context, instance_id, user):\n", " history = cls.load(context, instance_id)\n", " if history is not None:\n", " return history\n", " history = RootHistory(instance_id, user)\n", " return history.save()\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
128
0
# Copyright 2018-2020 by Christopher C. Little. # This file is part of Abydos. # # Abydos is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Abydos is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Abydos. If not, see <http://www.gnu.org/licenses/>. """abydos.distance._minkowski. Minkowski distance & similarity """ from typing import ( Any, Counter as TCounter, Optional, Sequence, Set, Union, cast, ) from ._token_distance import _TokenDistance from ..tokenizer import _Tokenizer __all__ = ['Minkowski'] class Minkowski(_TokenDistance): """Minkowski distance. The Minkowski distance :cite:`Minkowski:1910` is a distance metric in :math:`L^p-space`. .. versionadded:: 0.3.6 """ def __init__( self, pval: float = 1, alphabet: Optional[ Union[TCounter[str], Sequence[str], Set[str], int] ] = 0, tokenizer: Optional[_Tokenizer] = None, intersection_type: str = 'crisp', **kwargs: Any ) -> None: """Initialize Euclidean instance. Parameters ---------- pval : int The :math:`p`-value of the :math:`L^p`-space alphabet : collection or int The values or size of the alphabet tokenizer : _Tokenizer A tokenizer instance from the :py:mod:`abydos.tokenizer` package intersection_type : str Specifies the intersection type, and set type as a result: See :ref:`intersection_type <intersection_type>` description in :py:class:`_TokenDistance` for details. **kwargs Arbitrary keyword arguments Other Parameters ---------------- qval : int The length of each q-gram. Using this parameter and tokenizer=None will cause the instance to use the QGram tokenizer with this q value. metric : _Distance A string distance measure class for use in the ``soft`` and ``fuzzy`` variants. threshold : float A threshold value, similarities above which are counted as members of the intersection for the ``fuzzy`` variant. .. versionadded:: 0.4.0 """ super(Minkowski, self).__init__( tokenizer=tokenizer, alphabet=alphabet, intersection_type=intersection_type, **kwargs ) self.set_params(pval=pval) def dist_abs(self, src: str, tar: str, normalized: bool = False) -> float: """Return the Minkowski distance (:math:`L^p`-norm) of two strings. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison normalized : bool Normalizes to [0, 1] if True Returns ------- float The Minkowski distance Examples -------- >>> cmp = Minkowski() >>> cmp.dist_abs('cat', 'hat') 4.0 >>> cmp.dist_abs('Niall', 'Neil') 7.0 >>> cmp.dist_abs('Colin', 'Cuilen') 9.0 >>> cmp.dist_abs('ATCG', 'TAGC') 10.0 .. versionadded:: 0.3.0 .. versionchanged:: 0.3.6 Encapsulated in class """ self._tokenize(src, tar) diffs = self._symmetric_difference().values() normalizer = 1 if normalized: totals = self._total().values() if self.params['alphabet']: normalizer = self.params['alphabet'] elif self.params['pval'] == 0: normalizer = len(totals) else: normalizer = sum(_ ** self.params['pval'] for _ in totals) ** ( 1 / self.params['pval'] ) if len(diffs) == 0: return 0.0 if self.params['pval'] == float('inf'): # Chebyshev distance return max(diffs) / normalizer if self.params['pval'] == 0: # This is the l_0 "norm" as developed by David Donoho return sum(_ != 0 for _ in diffs) / normalizer return cast( float, sum(_ ** self.params['pval'] for _ in diffs) ** (1 / self.params['pval']) / normalizer, ) def dist(self, src: str, tar: str) -> float: """Return normalized Minkowski distance of two strings. The normalized Minkowski distance :cite:`Minkowski:1910` is a distance metric in :math:`L^p`-space, normalized to [0, 1]. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison Returns ------- float The normalized Minkowski distance Examples -------- >>> cmp = Minkowski() >>> cmp.dist('cat', 'hat') 0.5 >>> round(cmp.dist('Niall', 'Neil'), 12) 0.636363636364 >>> round(cmp.dist('Colin', 'Cuilen'), 12) 0.692307692308 >>> cmp.dist('ATCG', 'TAGC') 1.0 .. versionadded:: 0.3.0 .. versionchanged:: 0.3.6 Encapsulated in class """ return self.dist_abs(src, tar, normalized=True) if __name__ == '__main__': import doctest doctest.testmod()
[ "# Copyright 2018-2020 by Christopher C. Little.\n", "# This file is part of Abydos.\n", "#\n", "# Abydos is free software: you can redistribute it and/or modify\n", "# it under the terms of the GNU General Public License as published by\n", "# the Free Software Foundation, either version 3 of the License, or\n", "# (at your option) any later version.\n", "#\n", "# Abydos is distributed in the hope that it will be useful,\n", "# but WITHOUT ANY WARRANTY; without even the implied warranty of\n", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n", "# GNU General Public License for more details.\n", "#\n", "# You should have received a copy of the GNU General Public License\n", "# along with Abydos. If not, see <http://www.gnu.org/licenses/>.\n", "\n", "\"\"\"abydos.distance._minkowski.\n", "\n", "Minkowski distance & similarity\n", "\"\"\"\n", "\n", "from typing import (\n", " Any,\n", " Counter as TCounter,\n", " Optional,\n", " Sequence,\n", " Set,\n", " Union,\n", " cast,\n", ")\n", "\n", "from ._token_distance import _TokenDistance\n", "from ..tokenizer import _Tokenizer\n", "\n", "__all__ = ['Minkowski']\n", "\n", "\n", "class Minkowski(_TokenDistance):\n", " \"\"\"Minkowski distance.\n", "\n", " The Minkowski distance :cite:`Minkowski:1910` is a distance metric in\n", " :math:`L^p-space`.\n", "\n", " .. versionadded:: 0.3.6\n", " \"\"\"\n", "\n", " def __init__(\n", " self,\n", " pval: float = 1,\n", " alphabet: Optional[\n", " Union[TCounter[str], Sequence[str], Set[str], int]\n", " ] = 0,\n", " tokenizer: Optional[_Tokenizer] = None,\n", " intersection_type: str = 'crisp',\n", " **kwargs: Any\n", " ) -> None:\n", " \"\"\"Initialize Euclidean instance.\n", "\n", " Parameters\n", " ----------\n", " pval : int\n", " The :math:`p`-value of the :math:`L^p`-space\n", " alphabet : collection or int\n", " The values or size of the alphabet\n", " tokenizer : _Tokenizer\n", " A tokenizer instance from the :py:mod:`abydos.tokenizer` package\n", " intersection_type : str\n", " Specifies the intersection type, and set type as a result:\n", " See :ref:`intersection_type <intersection_type>` description in\n", " :py:class:`_TokenDistance` for details.\n", " **kwargs\n", " Arbitrary keyword arguments\n", "\n", " Other Parameters\n", " ----------------\n", " qval : int\n", " The length of each q-gram. Using this parameter and tokenizer=None\n", " will cause the instance to use the QGram tokenizer with this\n", " q value.\n", " metric : _Distance\n", " A string distance measure class for use in the ``soft`` and\n", " ``fuzzy`` variants.\n", " threshold : float\n", " A threshold value, similarities above which are counted as\n", " members of the intersection for the ``fuzzy`` variant.\n", "\n", "\n", " .. versionadded:: 0.4.0\n", "\n", " \"\"\"\n", " super(Minkowski, self).__init__(\n", " tokenizer=tokenizer,\n", " alphabet=alphabet,\n", " intersection_type=intersection_type,\n", " **kwargs\n", " )\n", " self.set_params(pval=pval)\n", "\n", " def dist_abs(self, src: str, tar: str, normalized: bool = False) -> float:\n", " \"\"\"Return the Minkowski distance (:math:`L^p`-norm) of two strings.\n", "\n", " Parameters\n", " ----------\n", " src : str\n", " Source string (or QGrams/Counter objects) for comparison\n", " tar : str\n", " Target string (or QGrams/Counter objects) for comparison\n", " normalized : bool\n", " Normalizes to [0, 1] if True\n", "\n", " Returns\n", " -------\n", " float\n", " The Minkowski distance\n", "\n", " Examples\n", " --------\n", " >>> cmp = Minkowski()\n", " >>> cmp.dist_abs('cat', 'hat')\n", " 4.0\n", " >>> cmp.dist_abs('Niall', 'Neil')\n", " 7.0\n", " >>> cmp.dist_abs('Colin', 'Cuilen')\n", " 9.0\n", " >>> cmp.dist_abs('ATCG', 'TAGC')\n", " 10.0\n", "\n", "\n", " .. versionadded:: 0.3.0\n", " .. versionchanged:: 0.3.6\n", " Encapsulated in class\n", "\n", " \"\"\"\n", " self._tokenize(src, tar)\n", " diffs = self._symmetric_difference().values()\n", "\n", " normalizer = 1\n", " if normalized:\n", " totals = self._total().values()\n", " if self.params['alphabet']:\n", " normalizer = self.params['alphabet']\n", " elif self.params['pval'] == 0:\n", " normalizer = len(totals)\n", " else:\n", " normalizer = sum(_ ** self.params['pval'] for _ in totals) ** (\n", " 1 / self.params['pval']\n", " )\n", "\n", " if len(diffs) == 0:\n", " return 0.0\n", " if self.params['pval'] == float('inf'):\n", " # Chebyshev distance\n", " return max(diffs) / normalizer\n", " if self.params['pval'] == 0:\n", " # This is the l_0 \"norm\" as developed by David Donoho\n", " return sum(_ != 0 for _ in diffs) / normalizer\n", " return cast(\n", " float,\n", " sum(_ ** self.params['pval'] for _ in diffs)\n", " ** (1 / self.params['pval'])\n", " / normalizer,\n", " )\n", "\n", " def dist(self, src: str, tar: str) -> float:\n", " \"\"\"Return normalized Minkowski distance of two strings.\n", "\n", " The normalized Minkowski distance :cite:`Minkowski:1910` is a distance\n", " metric in :math:`L^p`-space, normalized to [0, 1].\n", "\n", " Parameters\n", " ----------\n", " src : str\n", " Source string (or QGrams/Counter objects) for comparison\n", " tar : str\n", " Target string (or QGrams/Counter objects) for comparison\n", "\n", " Returns\n", " -------\n", " float\n", " The normalized Minkowski distance\n", "\n", " Examples\n", " --------\n", " >>> cmp = Minkowski()\n", " >>> cmp.dist('cat', 'hat')\n", " 0.5\n", " >>> round(cmp.dist('Niall', 'Neil'), 12)\n", " 0.636363636364\n", " >>> round(cmp.dist('Colin', 'Cuilen'), 12)\n", " 0.692307692308\n", " >>> cmp.dist('ATCG', 'TAGC')\n", " 1.0\n", "\n", "\n", " .. versionadded:: 0.3.0\n", " .. versionchanged:: 0.3.6\n", " Encapsulated in class\n", "\n", " \"\"\"\n", " return self.dist_abs(src, tar, normalized=True)\n", "\n", "\n", "if __name__ == '__main__':\n", " import doctest\n", "\n", " doctest.testmod()\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
206
0
# -*- coding: utf-8 -*- """ pygments.console ~~~~~~~~~~~~~~~~ Format colored console output. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ esc = "\x1b[" codes = {} codes[""] = "" codes["reset"] = esc + "39;49;00m" codes["bold"] = esc + "01m" codes["faint"] = esc + "02m" codes["standout"] = esc + "03m" codes["underline"] = esc + "04m" codes["blink"] = esc + "05m" codes["overline"] = esc + "06m" dark_colors = ["black", "darkred", "darkgreen", "brown", "darkblue", "purple", "teal", "lightgray"] light_colors = ["darkgray", "red", "green", "yellow", "blue", "fuchsia", "turquoise", "white"] x = 30 for d, l in zip(dark_colors, light_colors): codes[d] = esc + "%im" % x codes[l] = esc + "%i;01m" % x x += 1 del d, l, x codes["darkteal"] = codes["turquoise"] codes["darkyellow"] = codes["brown"] codes["fuscia"] = codes["fuchsia"] codes["white"] = codes["bold"] def reset_color(): return codes["reset"] def colorize(color_key, text): return codes[color_key] + text + codes["reset"] def ansiformat(attr, text): """ Format ``text`` with a color and/or some attributes:: color normal color *color* bold color _color_ underlined color +color+ blinking color """ result = [] if attr[:1] == attr[-1:] == '+': result.append(codes['blink']) attr = attr[1:-1] if attr[:1] == attr[-1:] == '*': result.append(codes['bold']) attr = attr[1:-1] if attr[:1] == attr[-1:] == '_': result.append(codes['underline']) attr = attr[1:-1] result.append(codes[attr]) result.append(text) result.append(codes['reset']) return ''.join(result)
[ "# -*- coding: utf-8 -*-\n", "\"\"\"\n", " pygments.console\n", " ~~~~~~~~~~~~~~~~\n", "\n", " Format colored console output.\n", "\n", " :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.\n", " :license: BSD, see LICENSE for details.\n", "\"\"\"\n", "\n", "esc = \"\\x1b[\"\n", "\n", "codes = {}\n", "codes[\"\"] = \"\"\n", "codes[\"reset\"] = esc + \"39;49;00m\"\n", "\n", "codes[\"bold\"] = esc + \"01m\"\n", "codes[\"faint\"] = esc + \"02m\"\n", "codes[\"standout\"] = esc + \"03m\"\n", "codes[\"underline\"] = esc + \"04m\"\n", "codes[\"blink\"] = esc + \"05m\"\n", "codes[\"overline\"] = esc + \"06m\"\n", "\n", "dark_colors = [\"black\", \"darkred\", \"darkgreen\", \"brown\", \"darkblue\",\n", " \"purple\", \"teal\", \"lightgray\"]\n", "light_colors = [\"darkgray\", \"red\", \"green\", \"yellow\", \"blue\",\n", " \"fuchsia\", \"turquoise\", \"white\"]\n", "\n", "x = 30\n", "for d, l in zip(dark_colors, light_colors):\n", " codes[d] = esc + \"%im\" % x\n", " codes[l] = esc + \"%i;01m\" % x\n", " x += 1\n", "\n", "del d, l, x\n", "\n", "codes[\"darkteal\"] = codes[\"turquoise\"]\n", "codes[\"darkyellow\"] = codes[\"brown\"]\n", "codes[\"fuscia\"] = codes[\"fuchsia\"]\n", "codes[\"white\"] = codes[\"bold\"]\n", "\n", "\n", "def reset_color():\n", " return codes[\"reset\"]\n", "\n", "\n", "def colorize(color_key, text):\n", " return codes[color_key] + text + codes[\"reset\"]\n", "\n", "\n", "def ansiformat(attr, text):\n", " \"\"\"\n", " Format ``text`` with a color and/or some attributes::\n", "\n", " color normal color\n", " *color* bold color\n", " _color_ underlined color\n", " +color+ blinking color\n", " \"\"\"\n", " result = []\n", " if attr[:1] == attr[-1:] == '+':\n", " result.append(codes['blink'])\n", " attr = attr[1:-1]\n", " if attr[:1] == attr[-1:] == '*':\n", " result.append(codes['bold'])\n", " attr = attr[1:-1]\n", " if attr[:1] == attr[-1:] == '_':\n", " result.append(codes['underline'])\n", " attr = attr[1:-1]\n", " result.append(codes[attr])\n", " result.append(text)\n", " result.append(codes['reset'])\n", " return ''.join(result)\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.041666666666666664, 0.02564102564102564, 0, 0.030303030303030304, 0.030303030303030304, 0.030303030303030304, 0, 0.030303030303030304, 0.030303030303030304, 0, 0.014285714285714285, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.024390243902439025, 0, 0.02564102564102564, 0.027777777777777776, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
74
0.004202
############################################################################# # # Copyright (C) 2013 Navi-X # # This file is part of Navi-X. # # Navi-X is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # Navi-X is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Navi-X. If not, see <http://www.gnu.org/licenses/>. # ############################################################################# ############################################################################# # # skin.py: # This file loads the GUI elements on the main screen. ############################################################################# from string import * import sys, os.path import urllib import urllib2 import re, random, string import xbmc, xbmcgui import re, os, time, datetime, traceback import shutil import zipfile import socket from settings import * from libs2 import * try: Emulating = xbmcgui.Emulating except: Emulating = False IMAGE_BG = 102 IMAGE_BG1 = 103 IMAGE_LOGO = 104 IMAGE_THUMB = 105 IMAGE_RATING = 126 LABEL_URLLBL = 106 LABEL_VERSION = 107 LABEL_INFOTEXT = 108 LABEL_DLINFOTEXT = 109 LABEL_LOADING = 110 LABEL_LISTPOS = 111 LIST_LIST1 = 112 IMAGE_DLLOGO = 118 LABEL_DT = 119 LIST_LIST2 = 120 TEXT_BOX_LIST2 = 121 LIST_LIST3 = 122 TEXT_BOX_LIST3 = 123 BUTTON_LEFT = 125 LIST_LIST4 = 127 BUTTON_RATE = 128 LIST_LIST5 = 129 BUTTON_RIGHT = 130 BUTTON_EXIT2 = 2125 Label_Protocol = 2108 ###################################################################### # Description: creates internal variables for the widget controls # Parameters : window: handle to the main window # Return : - ###################################################################### def load_skin(window): #images window.bg = window.getControl(IMAGE_BG) window.bg1 = window.getControl(IMAGE_BG1) #window.logo = window.getControl(IMAGE_LOGO) window.user_thumb = window.getControl(IMAGE_THUMB) window.download_logo = window.getControl(IMAGE_DLLOGO) window.rating = window.getControl(IMAGE_RATING) window.download_logo.setVisible(0) #labels window.urllbl = window.getControl(LABEL_URLLBL) window.dt = window.getControl(LABEL_DT) window.version = window.getControl(LABEL_VERSION) window.version.setLabel('version: '+ Version + '.' + SubVersion)#, "font10") window.infotekst = window.getControl(LABEL_INFOTEXT) window.infotekst.setVisible(False) window.dlinfotekst = window.getControl(LABEL_DLINFOTEXT) window.dlinfotekst.setVisible(False) window.loading = window.getControl(LABEL_LOADING) window.loading.setVisible(False) window.listpos = window.getControl(LABEL_LISTPOS) try: window.labProtocol = window.getControl(Label_Protocol) except: pass #lists window.list1 = window.getControl(LIST_LIST1) window.list2 = window.getControl(LIST_LIST2) window.list2.setVisible(False) window.list3 = window.getControl(LIST_LIST3) window.list4 = window.getControl(LIST_LIST4) window.list5 = window.getControl(LIST_LIST5) window.list5.setVisible(False) item = xbmcgui.ListItem("Home") window.list3.addItem(item) item = xbmcgui.ListItem("Favorites") window.list3.addItem(item) item = xbmcgui.ListItem("Downloads") window.list3.addItem(item) if platform != 'xbox': item = xbmcgui.ListItem("View: " + window.listview) window.list3.addItem(item) item = xbmcgui.ListItem("Browse") window.list3.addItem(item) item = xbmcgui.ListItem("Sign in") window.list3.addItem(item) item = xbmcgui.ListItem("Exit") window.list3.addItem(item) item = xbmcgui.ListItem("Play") window.list4.addItem(item) item = xbmcgui.ListItem("Add to Favorites") window.list4.addItem(item) item = xbmcgui.ListItem("Download") window.list4.addItem(item) item = xbmcgui.ListItem("Rate It") window.list4.addItem(item) item = xbmcgui.ListItem("Reload Playlist") window.list4.addItem(item) item = xbmcgui.ListItem("More Options...") window.list4.addItem(item) #textbox window.list2tb = window.getControl(TEXT_BOX_LIST2) #window.list2tb.setVisible(False) #textbox window.list3tb = window.getControl(TEXT_BOX_LIST3) window.list3tb.setVisible(False) #exit button #window.exitbutton2 = window.getControl(BUTTON_EXIT2) #set the large list as default window.list = window.list1 #end of function
[ "#############################################################################\n", "#\n", "# Copyright (C) 2013 Navi-X\n", "#\n", "# This file is part of Navi-X.\n", "#\n", "# Navi-X is free software: you can redistribute it and/or modify\n", "# it under the terms of the GNU General Public License as published by\n", "# the Free Software Foundation, either version 2 of the License, or\n", "# (at your option) any later version.\n", "#\n", "# Navi-X is distributed in the hope that it will be useful,\n", "# but WITHOUT ANY WARRANTY; without even the implied warranty of\n", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n", "# GNU General Public License for more details.\n", "#\n", "# You should have received a copy of the GNU General Public License\n", "# along with Navi-X. If not, see <http://www.gnu.org/licenses/>.\n", "#\n", "#############################################################################\n", "\n", "#############################################################################\n", "#\n", "# skin.py:\n", "# This file loads the GUI elements on the main screen.\n", "#############################################################################\n", "\n", "from string import *\n", "import sys, os.path\n", "import urllib\n", "import urllib2\n", "import re, random, string\n", "import xbmc, xbmcgui\n", "import re, os, time, datetime, traceback\n", "import shutil\n", "import zipfile\n", "import socket\n", "from settings import *\n", "from libs2 import *\n", "\n", "try: Emulating = xbmcgui.Emulating\n", "except: Emulating = False\n", "\n", "IMAGE_BG = 102\n", "IMAGE_BG1 = 103\n", "IMAGE_LOGO = 104\n", "IMAGE_THUMB = 105\n", "IMAGE_RATING = 126\n", "LABEL_URLLBL = 106\n", "LABEL_VERSION = 107\n", "LABEL_INFOTEXT = 108\n", "LABEL_DLINFOTEXT = 109\n", "LABEL_LOADING = 110\n", "LABEL_LISTPOS = 111\n", "LIST_LIST1 = 112 \n", "IMAGE_DLLOGO = 118\n", "LABEL_DT = 119\n", "LIST_LIST2 = 120\n", "TEXT_BOX_LIST2 = 121 \n", "LIST_LIST3 = 122\n", "TEXT_BOX_LIST3 = 123\n", "BUTTON_LEFT = 125\n", "LIST_LIST4 = 127\n", "BUTTON_RATE = 128\n", "LIST_LIST5 = 129\n", "BUTTON_RIGHT = 130\n", "BUTTON_EXIT2 = 2125\n", "Label_Protocol = 2108\n", "######################################################################\n", "# Description: creates internal variables for the widget controls\n", "# Parameters : window: handle to the main window\n", "# Return : -\n", "######################################################################\n", "def load_skin(window): \n", " #images\n", " window.bg = window.getControl(IMAGE_BG)\n", " window.bg1 = window.getControl(IMAGE_BG1)\n", " #window.logo = window.getControl(IMAGE_LOGO)\n", " window.user_thumb = window.getControl(IMAGE_THUMB)\n", " window.download_logo = window.getControl(IMAGE_DLLOGO)\n", " window.rating = window.getControl(IMAGE_RATING) \n", " window.download_logo.setVisible(0)\n", " \n", " #labels\n", " window.urllbl = window.getControl(LABEL_URLLBL)\n", " window.dt = window.getControl(LABEL_DT)\n", " \n", " window.version = window.getControl(LABEL_VERSION)\n", " window.version.setLabel('version: '+ Version + '.' + SubVersion)#, \"font10\")\n", " \n", " window.infotekst = window.getControl(LABEL_INFOTEXT)\n", " window.infotekst.setVisible(False)\n", " window.dlinfotekst = window.getControl(LABEL_DLINFOTEXT)\n", " window.dlinfotekst.setVisible(False)\n", " window.loading = window.getControl(LABEL_LOADING)\n", " window.loading.setVisible(False)\n", " window.listpos = window.getControl(LABEL_LISTPOS)\n", " \n", " try: window.labProtocol = window.getControl(Label_Protocol)\n", " except: pass\n", " \n", " #lists\n", " window.list1 = window.getControl(LIST_LIST1)\n", " window.list2 = window.getControl(LIST_LIST2)\n", " window.list2.setVisible(False)\n", " window.list3 = window.getControl(LIST_LIST3)\n", " window.list4 = window.getControl(LIST_LIST4) \n", " window.list5 = window.getControl(LIST_LIST5) \n", " window.list5.setVisible(False)\n", " \n", " item = xbmcgui.ListItem(\"Home\") \n", " window.list3.addItem(item)\n", " item = xbmcgui.ListItem(\"Favorites\") \n", " window.list3.addItem(item)\n", " item = xbmcgui.ListItem(\"Downloads\") \n", " window.list3.addItem(item) \n", " if platform != 'xbox':\n", " item = xbmcgui.ListItem(\"View: \" + window.listview) \n", " window.list3.addItem(item) \n", " item = xbmcgui.ListItem(\"Browse\") \n", " window.list3.addItem(item)\n", " item = xbmcgui.ListItem(\"Sign in\") \n", " window.list3.addItem(item) \n", " item = xbmcgui.ListItem(\"Exit\") \n", " window.list3.addItem(item)\n", " \n", " item = xbmcgui.ListItem(\"Play\") \n", " window.list4.addItem(item)\n", " item = xbmcgui.ListItem(\"Add to Favorites\") \n", " window.list4.addItem(item) \n", " item = xbmcgui.ListItem(\"Download\") \n", " window.list4.addItem(item) \n", " item = xbmcgui.ListItem(\"Rate It\") \n", " window.list4.addItem(item) \n", " item = xbmcgui.ListItem(\"Reload Playlist\") \n", " window.list4.addItem(item) \n", " item = xbmcgui.ListItem(\"More Options...\") \n", " window.list4.addItem(item) \n", " \n", " #textbox\n", " window.list2tb = window.getControl(TEXT_BOX_LIST2)\n", " #window.list2tb.setVisible(False)\n", " \n", " #textbox\n", " window.list3tb = window.getControl(TEXT_BOX_LIST3)\n", " window.list3tb.setVisible(False) \n", " \n", " #exit button\n", " #window.exitbutton2 = window.getControl(BUTTON_EXIT2)\n", " \n", " #set the large list as default\n", " window.list = window.list1\n", "\n", " #end of function\n", " \n", "\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05, 0, 0, 0.038461538461538464, 0.047619047619047616, 0.024390243902439025, 0, 0, 0, 0, 0, 0, 0.02857142857142857, 0.07692307692307693, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05, 0, 0, 0, 0.045454545454545456, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07692307692307693, 0.08333333333333333, 0, 0, 0.02040816326530612, 0, 0, 0.017857142857142856, 0, 0.05555555555555555, 0.08333333333333333, 0, 0, 0.2, 0, 0.04938271604938271, 0.2, 0, 0, 0, 0, 0, 0, 0, 0.2, 0.015625, 0.11764705882352941, 0.2, 0.09090909090909091, 0, 0, 0, 0, 0.02, 0.0196078431372549, 0, 0.3333333333333333, 0.02564102564102564, 0, 0.022727272727272728, 0, 0.022727272727272728, 0.03125, 0, 0.015873015873015872, 0.02631578947368421, 0.024390243902439025, 0, 0.023809523809523808, 0.02857142857142857, 0.02702702702702703, 0, 0.2, 0.02631578947368421, 0, 0.02, 0.02857142857142857, 0.024390243902439025, 0.02702702702702703, 0.025, 0.029411764705882353, 0.020833333333333332, 0.02702702702702703, 0.020833333333333332, 0.029411764705882353, 0.2, 0.07692307692307693, 0, 0.02631578947368421, 0.5, 0.07692307692307693, 0, 0.02631578947368421, 0.2, 0.058823529411764705, 0.017241379310344827, 0.2, 0.02857142857142857, 0, 0, 0.047619047619047616, 0.2, 1 ]
156
0.035457
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys import glob import unittest mydir = os.path.dirname(os.path.abspath(__file__)) tests_builddir = os.path.abspath(os.environ.get('TESTS_BUILDDIR', os.path.dirname(__file__))) builddir = os.path.dirname(tests_builddir) # we have to do this here instead of Makefile.am so that the implicitly added # directory for the source file comes after the builddir sys.path.insert(0, tests_builddir) sys.path.insert(0, builddir) os.environ['PYGTK_USE_GIL_STATE_API'] = '' sys.argv.append('--g-fatal-warnings') from gi.repository import GObject GObject.threads_init() SKIP_FILES = ['runtests', 'test_mainloop', # no os.fork on windows 'test_subprocess'] # blocks on testChildWatch if __name__ == '__main__': testdir = os.path.split(os.path.abspath(__file__))[0] os.chdir(testdir) def gettestnames(): files = glob.glob('*.py') names = map(lambda x: x[:-3], files) map(names.remove, SKIP_FILES) return names suite = unittest.TestSuite() loader = unittest.TestLoader() for name in gettestnames(): try: suite.addTest(loader.loadTestsFromName(name)) except Exception, e: print 'Could not load %s: %s' % (name, e) testRunner = unittest.TextTestRunner() testRunner.verbosity = 2 testRunner.run(suite)
[ "#!/usr/bin/env python\n", "# -*- coding: utf-8 -*-\n", "\n", "\n", "import os\n", "import sys\n", "import glob\n", "import unittest\n", "\n", "mydir = os.path.dirname(os.path.abspath(__file__))\n", "tests_builddir = os.path.abspath(os.environ.get('TESTS_BUILDDIR', os.path.dirname(__file__)))\n", "builddir = os.path.dirname(tests_builddir)\n", "\n", "# we have to do this here instead of Makefile.am so that the implicitly added\n", "# directory for the source file comes after the builddir\n", "sys.path.insert(0, tests_builddir)\n", "sys.path.insert(0, builddir)\n", "\n", "os.environ['PYGTK_USE_GIL_STATE_API'] = ''\n", "sys.argv.append('--g-fatal-warnings')\n", "\n", "from gi.repository import GObject\n", "GObject.threads_init()\n", "\n", "\n", "SKIP_FILES = ['runtests',\n", " 'test_mainloop', # no os.fork on windows\n", " 'test_subprocess'] # blocks on testChildWatch\n", "\n", "\n", "if __name__ == '__main__':\n", " testdir = os.path.split(os.path.abspath(__file__))[0]\n", " os.chdir(testdir)\n", "\n", " def gettestnames():\n", " files = glob.glob('*.py')\n", " names = map(lambda x: x[:-3], files)\n", " map(names.remove, SKIP_FILES)\n", " return names\n", "\n", " suite = unittest.TestSuite()\n", " loader = unittest.TestLoader()\n", "\n", " for name in gettestnames():\n", " try:\n", " suite.addTest(loader.loadTestsFromName(name))\n", " except Exception, e:\n", " print 'Could not load %s: %s' % (name, e)\n", "\n", " testRunner = unittest.TextTestRunner()\n", " testRunner.verbosity = 2\n", " testRunner.run(suite)\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010638297872340425, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.029411764705882353, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
52
0.00077
# encoding: utf-8 from __future__ import absolute_import, division, print_function, unicode_literals import datetime from django.db import models class Checkin(models.Model): username = models.CharField(max_length=255) # We're going to do some non-GeoDjango action, since the setup is # complex enough. You could just as easily do: # # location = models.PointField() # # ...and your ``search_indexes.py`` could be less complex. latitude = models.FloatField() longitude = models.FloatField() comment = models.CharField(max_length=140, blank=True, default='', help_text='Say something pithy.') created = models.DateTimeField(default=datetime.datetime.now) class Meta: ordering = ['-created'] # Again, with GeoDjango, this would be unnecessary. def get_location(self): # Nothing special about this Point, but ensure that's we don't have to worry # about import paths. from haystack.utils.geo import Point pnt = Point(self.longitude, self.latitude) return pnt
[ "# encoding: utf-8\n", "\n", "from __future__ import absolute_import, division, print_function, unicode_literals\n", "\n", "import datetime\n", "\n", "from django.db import models\n", "\n", "\n", "class Checkin(models.Model):\n", " username = models.CharField(max_length=255)\n", " # We're going to do some non-GeoDjango action, since the setup is\n", " # complex enough. You could just as easily do:\n", " #\n", " # location = models.PointField()\n", " #\n", " # ...and your ``search_indexes.py`` could be less complex.\n", " latitude = models.FloatField()\n", " longitude = models.FloatField()\n", " comment = models.CharField(max_length=140, blank=True, default='', help_text='Say something pithy.')\n", " created = models.DateTimeField(default=datetime.datetime.now)\n", "\n", " class Meta:\n", " ordering = ['-created']\n", "\n", " # Again, with GeoDjango, this would be unnecessary.\n", " def get_location(self):\n", " # Nothing special about this Point, but ensure that's we don't have to worry\n", " # about import paths.\n", " from haystack.utils.geo import Point\n", " pnt = Point(self.longitude, self.latitude)\n", " return pnt\n" ]
[ 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009523809523809525, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0 ]
32
0.001042
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Since after Luigi 2.5.0, this is a private module to Luigi. Luigi users should not rely on that importing this module works. Furthermore, "luigi mr streaming" have been greatly superseeded by technoligies like Spark, Hive, etc. The hadoop runner. This module contains the main() method which will be used to run the mapper, combiner, or reducer on the Hadoop nodes. """ from __future__ import print_function try: import cPickle as pickle except ImportError: import pickle import logging import os import sys import tarfile import traceback class Runner(object): """ Run the mapper, combiner, or reducer on hadoop nodes. """ def __init__(self, job=None): self.extract_packages_archive() self.job = job or pickle.load(open("job-instance.pickle", "rb")) self.job._setup_remote() def run(self, kind, stdin=sys.stdin, stdout=sys.stdout): if kind == "map": self.job.run_mapper(stdin, stdout) elif kind == "combiner": self.job.run_combiner(stdin, stdout) elif kind == "reduce": self.job.run_reducer(stdin, stdout) else: raise Exception('weird command: %s' % kind) def extract_packages_archive(self): if not os.path.exists("packages.tar"): return tar = tarfile.open("packages.tar") for tarinfo in tar: tar.extract(tarinfo) tar.close() if '' not in sys.path: sys.path.insert(0, '') def print_exception(exc): tb = traceback.format_exc() print('luigi-exc-hex=%s' % tb.encode('hex'), file=sys.stderr) def main(args=None, stdin=sys.stdin, stdout=sys.stdout, print_exception=print_exception): """ Run either the mapper, combiner, or reducer from the class instance in the file "job-instance.pickle". Arguments: kind -- is either map, combiner, or reduce """ try: # Set up logging. logging.basicConfig(level=logging.WARN) kind = args is not None and args[1] or sys.argv[1] Runner().run(kind, stdin=stdin, stdout=stdout) except Exception as exc: # Dump encoded data that we will try to fetch using mechanize print_exception(exc) raise if __name__ == '__main__': main()
[ "#!/usr/bin/env python\n", "# -*- coding: utf-8 -*-\n", "#\n", "# Copyright 2012-2015 Spotify AB\n", "#\n", "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", "# you may not use this file except in compliance with the License.\n", "# You may obtain a copy of the License at\n", "#\n", "# http://www.apache.org/licenses/LICENSE-2.0\n", "#\n", "# Unless required by applicable law or agreed to in writing, software\n", "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", "# See the License for the specific language governing permissions and\n", "# limitations under the License.\n", "#\n", "\n", "\"\"\"\n", "Since after Luigi 2.5.0, this is a private module to Luigi. Luigi users should\n", "not rely on that importing this module works. Furthermore, \"luigi mr streaming\"\n", "have been greatly superseeded by technoligies like Spark, Hive, etc.\n", "\n", "The hadoop runner.\n", "\n", "This module contains the main() method which will be used to run the\n", "mapper, combiner, or reducer on the Hadoop nodes.\n", "\"\"\"\n", "\n", "from __future__ import print_function\n", "\n", "try:\n", " import cPickle as pickle\n", "except ImportError:\n", " import pickle\n", "import logging\n", "import os\n", "import sys\n", "import tarfile\n", "import traceback\n", "\n", "\n", "class Runner(object):\n", " \"\"\"\n", " Run the mapper, combiner, or reducer on hadoop nodes.\n", " \"\"\"\n", "\n", " def __init__(self, job=None):\n", " self.extract_packages_archive()\n", " self.job = job or pickle.load(open(\"job-instance.pickle\", \"rb\"))\n", " self.job._setup_remote()\n", "\n", " def run(self, kind, stdin=sys.stdin, stdout=sys.stdout):\n", " if kind == \"map\":\n", " self.job.run_mapper(stdin, stdout)\n", " elif kind == \"combiner\":\n", " self.job.run_combiner(stdin, stdout)\n", " elif kind == \"reduce\":\n", " self.job.run_reducer(stdin, stdout)\n", " else:\n", " raise Exception('weird command: %s' % kind)\n", "\n", " def extract_packages_archive(self):\n", " if not os.path.exists(\"packages.tar\"):\n", " return\n", "\n", " tar = tarfile.open(\"packages.tar\")\n", " for tarinfo in tar:\n", " tar.extract(tarinfo)\n", " tar.close()\n", " if '' not in sys.path:\n", " sys.path.insert(0, '')\n", "\n", "\n", "def print_exception(exc):\n", " tb = traceback.format_exc()\n", " print('luigi-exc-hex=%s' % tb.encode('hex'), file=sys.stderr)\n", "\n", "\n", "def main(args=None, stdin=sys.stdin, stdout=sys.stdout, print_exception=print_exception):\n", " \"\"\"\n", " Run either the mapper, combiner, or reducer from the class instance in the file \"job-instance.pickle\".\n", "\n", " Arguments:\n", "\n", " kind -- is either map, combiner, or reduce\n", " \"\"\"\n", " try:\n", " # Set up logging.\n", " logging.basicConfig(level=logging.WARN)\n", "\n", " kind = args is not None and args[1] or sys.argv[1]\n", " Runner().run(kind, stdin=stdin, stdout=stdout)\n", " except Exception as exc:\n", " # Dump encoded data that we will try to fetch using mechanize\n", " print_exception(exc)\n", " raise\n", "\n", "\n", "if __name__ == '__main__':\n", " main()\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0, 0.009345794392523364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
101
0.000325
# Copyright (c) 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ If a child cell hasn't sent capacity or capability updates in a while, downgrade its likelihood of being chosen for scheduling requests. """ from oslo.config import cfg from oslo.utils import timeutils from nova.cells import weights from nova.i18n import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) mute_weigher_opts = [ cfg.FloatOpt('mute_weight_multiplier', default=-10.0, help='Multiplier used to weigh mute children. (The value ' 'should be negative.)'), cfg.FloatOpt('mute_weight_value', default=1000.0, help='Weight value assigned to mute children. (The value ' 'should be positive.)'), ] CONF = cfg.CONF CONF.import_opt('mute_child_interval', 'nova.cells.opts', group='cells') CONF.register_opts(mute_weigher_opts, group='cells') class MuteChildWeigher(weights.BaseCellWeigher): """If a child cell hasn't been heard from, greatly lower its selection weight. """ def weight_multiplier(self): # negative multiplier => lower weight return CONF.cells.mute_weight_multiplier def _weigh_object(self, cell, weight_properties): """Check cell against the last_seen timestamp that indicates the time that the most recent capability or capacity update was received from the given cell. """ last_seen = cell.last_seen secs = CONF.cells.mute_child_interval if timeutils.is_older_than(last_seen, secs): # yep, that's a mute child; recommend highly that it be skipped! LOG.warn(_("%(cell)s has not been seen since %(last_seen)s and is " "being treated as mute."), {'cell': cell, 'last_seen': last_seen}) return CONF.cells.mute_weight_value else: return 0
[ "# Copyright (c) 2013 Rackspace Hosting\n", "# All Rights Reserved.\n", "#\n", "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n", "# not use this file except in compliance with the License. You may obtain\n", "# a copy of the License at\n", "#\n", "# http://www.apache.org/licenses/LICENSE-2.0\n", "#\n", "# Unless required by applicable law or agreed to in writing, software\n", "# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n", "# License for the specific language governing permissions and limitations\n", "# under the License.\n", "\n", "\"\"\"\n", "If a child cell hasn't sent capacity or capability updates in a while,\n", "downgrade its likelihood of being chosen for scheduling requests.\n", "\"\"\"\n", "\n", "from oslo.config import cfg\n", "from oslo.utils import timeutils\n", "\n", "from nova.cells import weights\n", "from nova.i18n import _\n", "from nova.openstack.common import log as logging\n", "\n", "LOG = logging.getLogger(__name__)\n", "\n", "mute_weigher_opts = [\n", " cfg.FloatOpt('mute_weight_multiplier',\n", " default=-10.0,\n", " help='Multiplier used to weigh mute children. (The value '\n", " 'should be negative.)'),\n", " cfg.FloatOpt('mute_weight_value',\n", " default=1000.0,\n", " help='Weight value assigned to mute children. (The value '\n", " 'should be positive.)'),\n", "]\n", "\n", "CONF = cfg.CONF\n", "CONF.import_opt('mute_child_interval', 'nova.cells.opts', group='cells')\n", "CONF.register_opts(mute_weigher_opts, group='cells')\n", "\n", "\n", "class MuteChildWeigher(weights.BaseCellWeigher):\n", " \"\"\"If a child cell hasn't been heard from, greatly lower its selection\n", " weight.\n", " \"\"\"\n", "\n", " def weight_multiplier(self):\n", " # negative multiplier => lower weight\n", " return CONF.cells.mute_weight_multiplier\n", "\n", " def _weigh_object(self, cell, weight_properties):\n", " \"\"\"Check cell against the last_seen timestamp that indicates the time\n", " that the most recent capability or capacity update was received from\n", " the given cell.\n", " \"\"\"\n", "\n", " last_seen = cell.last_seen\n", " secs = CONF.cells.mute_child_interval\n", "\n", " if timeutils.is_older_than(last_seen, secs):\n", " # yep, that's a mute child; recommend highly that it be skipped!\n", " LOG.warn(_(\"%(cell)s has not been seen since %(last_seen)s and is \"\n", " \"being treated as mute.\"),\n", " {'cell': cell, 'last_seen': last_seen})\n", " return CONF.cells.mute_weight_value\n", " else:\n", " return 0\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03225806451612903, 0.013333333333333334, 0, 0, 0.03125, 0.013333333333333334, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
71
0.00127
from __future__ import division import numpy as np import climlab import pytest #from climlab.radiation import RRTMG, RRTMG_LW, RRTMG_SW, CAM3Radiation_LW from climlab.radiation.rrtm import _climlab_to_rrtm, _rrtm_to_climlab num_lev = 30 def test_rrtmg_lw_creation(): state = climlab.column_state(num_lev=num_lev, water_depth=5.) rad = climlab.radiation.RRTMG_LW(state=state) # are the transformations reversible? assert np.all(_rrtm_to_climlab(_climlab_to_rrtm(rad.Ts)) == rad.Ts) assert np.all(_rrtm_to_climlab(_climlab_to_rrtm(rad.Tatm)) == rad.Tatm) def test_rrtm_creation(): # initial state (temperatures) state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.) # Create a RRTM radiation model rad = climlab.radiation.RRTMG(state=state) rad.step_forward() assert type(rad.subprocess['LW']) is climlab.radiation.RRTMG_LW assert type(rad.subprocess['SW']) is climlab.radiation.RRTMG_SW assert hasattr(rad, 'OLR') assert hasattr(rad, 'OLRclr') assert hasattr(rad, 'ASR') assert hasattr(rad, 'ASRclr') def test_swap_component(): # initial state (temperatures) state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.) # Create a RRTM radiation model rad = climlab.radiation.RRTMG(state=state) rad.step_forward() # Swap out the longwave model for CAM3 rad.remove_subprocess('LW') rad.step_forward() rad.add_subprocess('LW', climlab.radiation.CAM3Radiation_LW(state=state)) rad.step_forward() assert hasattr(rad, 'OLR') def test_multidim(): state = climlab.column_state(num_lev=40, num_lat=3, water_depth=5.) rad = climlab.radiation.RRTMG_LW(state=state) # are the transformations reversible? assert np.all(_rrtm_to_climlab(_climlab_to_rrtm(rad.Ts)) == rad.Ts) assert np.all(_rrtm_to_climlab(_climlab_to_rrtm(rad.Tatm)) == rad.Tatm) # Can we integrate? rad.step_forward() assert rad.OLR.shape == rad.Ts.shape def test_radiative_forcing(): '''Run a single-column radiative-convective model with CAM3 radiation out to equilibrium. Clone the model, double CO2 and measure the instantaneous change in TOA flux. It should be positive net downward flux.''' # State variables (Air and surface temperature) state = climlab.column_state(num_lev=30, water_depth=1.) # Parent model process rcm = climlab.TimeDependentProcess(state=state) # Fixed relative humidity h2o = climlab.radiation.ManabeWaterVapor(state=state) # Couple water vapor to radiation rad = climlab.radiation.RRTMG(state=state, h2ovmr=h2o.q) # Convective adjustment conv = climlab.convection.ConvectiveAdjustment(state=state, adj_lapse_rate=6.5) # Couple everything together rcm.add_subprocess('Radiation', rad) rcm.add_subprocess('WaterVapor', h2o) rcm.add_subprocess('Convection', conv) rcm.integrate_years(5.) #assert np.abs(rcm.ASR - rcm.OLR) < 0.1 # close to energy balance # There is currently a problem with energy conservation in the RRTM module, need to look into this. rcm2 = climlab.process_like(rcm) rcm2.subprocess['Radiation'].co2vmr *= 2. rcm2.compute_diagnostics() #assert (rcm2.ASR - rcm2.OLR) > 1. # positive radiative forcing # For now this test just checks whether the API calls work without error. # We will reinstate the physical checks after fixing the energy bug.
[ "from __future__ import division\n", "import numpy as np\n", "import climlab\n", "import pytest\n", "#from climlab.radiation import RRTMG, RRTMG_LW, RRTMG_SW, CAM3Radiation_LW\n", "from climlab.radiation.rrtm import _climlab_to_rrtm, _rrtm_to_climlab\n", "\n", "num_lev = 30\n", "\n", "def test_rrtmg_lw_creation():\n", " state = climlab.column_state(num_lev=num_lev, water_depth=5.)\n", " rad = climlab.radiation.RRTMG_LW(state=state)\n", " # are the transformations reversible?\n", " assert np.all(_rrtm_to_climlab(_climlab_to_rrtm(rad.Ts)) == rad.Ts)\n", " assert np.all(_rrtm_to_climlab(_climlab_to_rrtm(rad.Tatm)) == rad.Tatm)\n", "\n", "def test_rrtm_creation():\n", " # initial state (temperatures)\n", " state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.)\n", " # Create a RRTM radiation model\n", " rad = climlab.radiation.RRTMG(state=state)\n", " rad.step_forward()\n", " assert type(rad.subprocess['LW']) is climlab.radiation.RRTMG_LW\n", " assert type(rad.subprocess['SW']) is climlab.radiation.RRTMG_SW\n", " assert hasattr(rad, 'OLR')\n", " assert hasattr(rad, 'OLRclr')\n", " assert hasattr(rad, 'ASR')\n", " assert hasattr(rad, 'ASRclr')\n", "\n", "def test_swap_component():\n", " # initial state (temperatures)\n", " state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.)\n", " # Create a RRTM radiation model\n", " rad = climlab.radiation.RRTMG(state=state)\n", " rad.step_forward()\n", " # Swap out the longwave model for CAM3\n", " rad.remove_subprocess('LW')\n", " rad.step_forward()\n", " rad.add_subprocess('LW', climlab.radiation.CAM3Radiation_LW(state=state))\n", " rad.step_forward()\n", " assert hasattr(rad, 'OLR')\n", "\n", "def test_multidim():\n", " state = climlab.column_state(num_lev=40, num_lat=3, water_depth=5.)\n", " rad = climlab.radiation.RRTMG_LW(state=state)\n", " # are the transformations reversible?\n", " assert np.all(_rrtm_to_climlab(_climlab_to_rrtm(rad.Ts)) == rad.Ts)\n", " assert np.all(_rrtm_to_climlab(_climlab_to_rrtm(rad.Tatm)) == rad.Tatm)\n", " # Can we integrate?\n", " rad.step_forward()\n", " assert rad.OLR.shape == rad.Ts.shape\n", "\n", "def test_radiative_forcing():\n", " '''Run a single-column radiative-convective model with CAM3 radiation\n", " out to equilibrium. Clone the model, double CO2 and measure the instantaneous\n", " change in TOA flux. It should be positive net downward flux.'''\n", " # State variables (Air and surface temperature)\n", " state = climlab.column_state(num_lev=30, water_depth=1.)\n", " # Parent model process\n", " rcm = climlab.TimeDependentProcess(state=state)\n", " # Fixed relative humidity\n", " h2o = climlab.radiation.ManabeWaterVapor(state=state)\n", " # Couple water vapor to radiation\n", " rad = climlab.radiation.RRTMG(state=state, h2ovmr=h2o.q)\n", " # Convective adjustment\n", " conv = climlab.convection.ConvectiveAdjustment(state=state,\n", " adj_lapse_rate=6.5)\n", " # Couple everything together\n", " rcm.add_subprocess('Radiation', rad)\n", " rcm.add_subprocess('WaterVapor', h2o)\n", " rcm.add_subprocess('Convection', conv)\n", "\n", " rcm.integrate_years(5.)\n", " #assert np.abs(rcm.ASR - rcm.OLR) < 0.1 # close to energy balance\n", " # There is currently a problem with energy conservation in the RRTM module, need to look into this.\n", " rcm2 = climlab.process_like(rcm)\n", " rcm2.subprocess['Radiation'].co2vmr *= 2.\n", " rcm2.compute_diagnostics()\n", " #assert (rcm2.ASR - rcm2.OLR) > 1. # positive radiative forcing\n", " # For now this test just checks whether the API calls work without error.\n", " # We will reinstate the physical checks after fixing the energy bug.\n" ]
[ 0, 0, 0, 0, 0.013333333333333334, 0, 0, 0, 0, 0.03333333333333333, 0, 0, 0, 0, 0, 0, 0.038461538461538464, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.037037037037037035, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.047619047619047616, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03333333333333333, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.014084507042253521, 0.009523809523809525, 0, 0, 0, 0.014492753623188406, 0, 0 ]
81
0.003129
# -*- coding: utf-8 -*- # Copyright 2012 Vincent Jacques # vincent@vincent-jacques.net # This file is part of PyGithub. http://vincent-jacques.net/PyGithub # PyGithub is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License along with PyGithub. If not, see <http://www.gnu.org/licenses/>. import Framework class Markdown(Framework.TestCase): def setUp(self): Framework.TestCase.setUp(self) self.text = "MyTitle\n=======\n\nIssue #1" self.repo = self.g.get_user().get_repo("PyGithub") def testRenderMarkdown(self): self.assertEqual(self.g.render_markdown(self.text), '<h1><a name="mytitle" class="anchor" href="#mytitle"><span class="mini-icon mini-icon-link"></span></a>MyTitle</h1><p>Issue #1</p>') def testRenderGithubFlavoredMarkdown(self): self.assertEqual(self.g.render_markdown(self.text, self.repo), '<h1>MyTitle</h1><p>Issue <a href="https://github.com/jacquev6/PyGithub/issues/1" class="issue-link" title="Gitub -&gt; Github everywhere">#1</a></p>')
[ "# -*- coding: utf-8 -*-\n", "\n", "# Copyright 2012 Vincent Jacques\n", "# vincent@vincent-jacques.net\n", "\n", "# This file is part of PyGithub. http://vincent-jacques.net/PyGithub\n", "\n", "# PyGithub is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License\n", "# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.\n", "\n", "# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of\n", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.\n", "\n", "# You should have received a copy of the GNU Lesser General Public License along with PyGithub. If not, see <http://www.gnu.org/licenses/>.\n", "\n", "import Framework\n", "\n", "\n", "class Markdown(Framework.TestCase):\n", " def setUp(self):\n", " Framework.TestCase.setUp(self)\n", " self.text = \"MyTitle\\n=======\\n\\nIssue #1\"\n", " self.repo = self.g.get_user().get_repo(\"PyGithub\")\n", "\n", " def testRenderMarkdown(self):\n", " self.assertEqual(self.g.render_markdown(self.text), '<h1><a name=\"mytitle\" class=\"anchor\" href=\"#mytitle\"><span class=\"mini-icon mini-icon-link\"></span></a>MyTitle</h1><p>Issue #1</p>')\n", "\n", " def testRenderGithubFlavoredMarkdown(self):\n", " self.assertEqual(self.g.render_markdown(self.text, self.repo), '<h1>MyTitle</h1><p>Issue <a href=\"https://github.com/jacquev6/PyGithub/issues/1\" class=\"issue-link\" title=\"Gitub -&gt; Github everywhere\">#1</a></p>')\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0.007874015748031496, 0.008333333333333333, 0, 0.008, 0.008620689655172414, 0, 0.0070921985815602835, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.005154639175257732, 0, 0, 0.004484304932735426 ]
29
0.001709
""" The *nussl* External File Zoo (EFZ) is a server that houses all files that are too large to bundle with *nussl* when distributing it through ``pip`` or Github. These types of files include audio examples, benchmark files for tests, and trained neural network models. *nussl* has built-in utilities for accessing the EFZ through its API. Here, it is possible to see what files are available on the EFZ and download desired files. The EFZ utilities allow for such functionality. """ import warnings import json import os import sys import hashlib from six.moves.urllib_parse import urljoin from six.moves.urllib.error import HTTPError from six.moves.urllib.error import URLError from six.moves.urllib.request import urlopen, Request from six.moves.urllib.request import urlretrieve from . import constants def get_available_audio_files(): """ Returns a list of dicts containing metadata of the available audio files on the nussl External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/). Each entry in the list is in the following format: .. code-block:: python { u'file_length_seconds': 5.00390022675737, u'visible': True, u'file_name': u'K0140.wav', u'date_modified': u'2018-06-01', u'file_hash': u'f0d8d3c8d199d3790b0e42d1e5df50a6801f928d10f533149ed0babe61b5d7b5', u'file_size_bytes': 441388, u'file_description': u'Acoustic piano playing middle C.', u'audio_attributes': u'piano, middle C', u'file_size': u'431.0KiB', u'date_added': u'2018-06-01' } See Also: * :func:`print_available_audio_files`, prints a list of the audio files to the console. * :func:`download_audio_file` to download an audio file from the EFZ. Returns: (list): A list of dicts containing metadata of the available audio files on the nussl External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/). """ # _download_all_metadata() will throw its own errors, so no try block around it return _download_all_metadata(constants.NUSSL_EFZ_AUDIO_METADATA_URL) def print_available_audio_files(): """ Prints a message to the console that shows all of the available audio files that are on the nussl External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/). See Also: * :func:`get_available_audio_files` to get this same data from the EFZ server as a list. * :func:`download_audio_file` to download an audio file from the EFZ. Example: >>> import nussl >>> nussl.efz_utils.print_available_audio_files() File Name Duration (sec) Size Description dev1_female3_inst_mix.wav 10.0 1.7MiB Instantaneous mixture of three female speakers talking in a stereo field. dev1_female3_synthconv_130ms_5cm_mix.wav 10.0 1.7MiB Three female speakers talking in a stereo field, with 130ms of inter-channel delay. K0140.wav 5.0 431.0KiB Acoustic piano playing middle C. K0149.wav 5.0 430.0KiB Acoustic piano playing the A above middle C. (A440) To download one of these files insert the file name as the first parameter to :func:`download_audio_file`, like so: >>> nussl.efz_utils.download_audio_file('K0140.wav') """ file_metadata = get_available_audio_files() print(f'{"File Name":40} {"Duration (sec)":15} {"Size":10} {"Description":50}') for info in file_metadata: print( f'{info["file_name"]:40} {info["file_length_seconds"]:15} {info["file_size"]:10}' f' {info["file_description"]:50}' ) print('To download one of these files insert the file name ' 'as the first parameter to nussl.download_audio_file(), like so: \n' ' >>> nussl.efz_utils.download_audio_file(\'K0140.wav\')') def get_available_trained_models(): """ Returns a list of dicts containing metadata of the available trained models on the nussl External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/). Each entry in the list is in the following format: .. code-block:: python { u'for_class': u'DeepClustering', u'visible': True, u'file_name': u'deep_clustering_vocals_44k_long.model', u'date_modified': u'2018-06-01', u'file_hash': u'e09034c2cb43a293ece0b121f113b8e4e1c5a247331c71f40cb9ca38227ccc2c', u'file_size_bytes': 94543355, u'file_description': u'Deep clustering for vocal separation trained on augmented DSD100.', u'file_size': u'90.2MiB', u'date_added': u'2018-06-01' } Notes: Most of the entries in the dictionary are self-explanatory, but note the ``for_class`` entry. The ``for_class`` entry specifies which `nussl` separation class the given model will work with. Usually, `nussl` separation classes that require a model will default so retrieving a model on the EFZ server (if not already found on the user's machine), but sometimes it is desirable to use a model other than the default one provided. In this case, the ``for_class`` entry lets the user know which class it is valid for use with. Additionally, trying to load a model into a class that it is not explicitly labeled for that class will raise an exception. Just don't do it, ok? See Also: * :func:`print_available_trained_models`, prints a list of the trained models to the console. * :func:`download_trained_model` to download a trained model from the EFZ. Returns: (list): A list of dicts containing metadata of the available trained models on the nussl External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/). """ return _download_all_metadata(constants.NUSSL_EFZ_MODEL_METADATA_URL) def print_available_trained_models(): """ Prints a message to the console that shows all of the available trained models that are on the nussl External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/). Notes: Most of the entries in the dictionary are self-explanatory, but note the ``for_class`` entry. The ``for_class`` entry specifies which `nussl` separation class the given model will work with. Usually, `nussl` separation classes that require a model will default so retrieving a model on the EFZ server (if not already found on the user's machine), but sometimes it is desirable to use a model other than the default one provided. In this case, the ``for_class`` entry lets the user know which class it is valid for use with. Additionally, trying to load a model into a class that it is not explicitly labeled for that class will raise an exception. Just don't do it, ok? See Also: * :func:`get_available_trained_models` to get this same data from the EFZ server as a list. * :func:`download_trained_model` to download a trained model from the EFZ. Example: >>> import nussl >>> nussl.efz_utils.print_available_trained_models() File Name For Class Size Description deep_clustering_model.model DeepClustering 48.1MiB example Deep Clustering model deep_clustering_vocal_44k_long.model DeepClustering 90.2MiB trained DC model for vocal extraction To download one of these files insert the file name as the first parameter to download_trained_model(), like so: >>> nussl.efz_utils.download_trained_model('deep_clustering_model.h5') """ file_metadata = get_available_trained_models() print(f'{"File Name":40} {"For Class":15} {"Size":10} {"Description":50}') for info in file_metadata: print( f'{info["file_name"]:40} {info["for_class"]:15} {info["file_size"]:10}' f' {info["file_description"]:50}' ) print('To download one of these files insert the file name ' 'as the first parameter to nussl.download_trained_model, like so: \n' ' >>> nussl.efz_utils.download_trained_model(\'deep_clustering_model.h5\')') def get_available_benchmark_files(): """ Returns a list of dicts containing metadata of the available benchmark files for tests on the nussl External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/). Each entry in the list is in the following format: .. code-block:: python { u'for_class': u'DuetUnitTests', u'visible': True, u'file_name': u'benchmark_atn_bins.npy', u'date_modified': u'2018-06-19', u'file_hash': u'cf7fef6f4ea9af3dbde8b9880602eeaf72507b6c78f04097c5e79d34404a8a1f', u'file_size_bytes': 488, u'file_description': u'Attenuation bins numpy array for DUET benchmark test.', u'file_size': u'488.0B', u'date_added': u'2018-06-19' } Notes: Most of the entries in the dictionary are self-explanatory, but note the `for_class` entry. The `for_class` entry specifies which `nussl` benchmark class will load the corresponding benchmark file. Make sure these match exactly when writing tests! See Also: * :func:`print_available_benchmark_files`, prints a list of the benchmark files to the console. * :func:`download_benchmark_file` to download an benchmark file from the EFZ. Returns: (list): A list of dicts containing metadata of the available audio files on the nussl External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/). """ return _download_all_metadata(constants.NUSSL_EFZ_BENCHMARK_METADATA_URL) def print_available_benchmark_files(): """ Prints a message to the console that shows all of the available benchmark files that are on the nussl External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/). Example: >>> import nussl >>> nussl.efz_utils.print_available_benchmark_files() File Name For Class Size Description mix3_matlab_repet_foreground.mat TestRepet 6.4MiB Foreground matrix for Repet class benchmark test. benchmark_atn_bins.npy DuetUnitTests 488.0B Attenuation bins numpy array for DUET benchmark test. benchmark_sym_atn.npy DuetUnitTests 3.4MiB Symmetric attenuation histogram for the DUET benchmark test. benchmark_wmat.npy DuetUnitTests 3.4MiB Frequency matrix for the DUET benchmark test. To download one of these files insert the file name as the first parameter to nussl.download_benchmark_file, like so: >>> nussl.efz_utils.download_benchmark_file('example.npy') Notes: Most of the entries in the printed list are self-explanatory, but note the ``for_class`` entry. The ``for_class`` entry specifies which `nussl` benchmark class will load the corresponding benchmark file. Make sure these match exactly when writing tests! See Also: * :func:`get_available_benchmark_files`, prints a list of the benchmark files to the console. * :func:`download_benchmark_file` to download an benchmark file from the EFZ. """ file_metadata = get_available_benchmark_files() print(f'{"File Name":40} {"For Class":15} {"Size":10} {"Description":50}') for info in file_metadata: print( f'{info["file_name"]:40} {info["for_class"]:15} {info["file_size"]:10}' f' {info["file_description"]:50}' ) print('To download one of these files insert the file name ' 'as the first parameter to nussl.download_benchmark_file, like so: \n' ' >>> nussl.efz_utils.download_benchmark_file(\'example.npy\')') def _download_all_metadata(url): """ Downloads the json file that contains all of the metadata for a specific file type (read: audio files, benchmark files, or trained models) that is on the EFZ server. This is retrieved from one of following three URLs (which are stored in nussl.constants): NUSSL_EFZ_AUDIO_METADATA_URL, NUSSL_EFZ_BENCHMARK_METADATA_URL, or NUSSL_EFZ_MODEL_METADATA_URL. Args: url (str): URL for the EFZ server that has metadata. One of these three: NUSSL_EFZ_AUDIO_METADATA_URL, NUSSL_EFZ_BENCHMARK_METADATA_URL, or NUSSL_EFZ_MODEL_METADATA_URL. Returns: (list): List of dicts with metadata for the desired file type. """ request = Request(url) # Make sure to get the newest data request.add_header('Pragma', 'no-cache') request.add_header('Cache-Control', 'max-age=0') try: return json.loads(urlopen(request).read()) except: raise NoConnectivityError("Can't connect to internet") def _download_metadata_for_file(file_name, file_type): """ Downloads the metadata entry for a specific file (:param:`file_name`) on the EFZ server. Args: file_name (str): File name as specified on the EFZ server. file_type (str): 'Type' of file, either 'audio', 'model', or 'benchmark'. Returns: (dict) Metadata entry for the specified file, or ``None`` if it could not be located. """ metadata_urls = { 'audio': constants.NUSSL_EFZ_AUDIO_METADATA_URL, 'benchmark': constants.NUSSL_EFZ_BENCHMARK_METADATA_URL, 'model': constants.NUSSL_EFZ_MODEL_METADATA_URL, } if file_type in metadata_urls: metadata_url = metadata_urls[file_type] else: # wrong file type, return raise MetadataError(f'Cannot find metadata of type {file_type}.') metadata = _download_all_metadata(metadata_url) for file_metadata in metadata: if file_metadata['file_name'] == file_name: return file_metadata raise MetadataError( f'No matching metadata for file {file_name}' f' at url {constants.NUSSL_EFZ_AUDIO_METADATA_URL}!' ) def download_audio_file(audio_file_name, local_folder=None, verbose=True): """ Downloads the specified audio file from the `nussl` External File Zoo (EFZ) server. The downloaded file is stored in :param:`local_folder` if a folder is provided. If a folder is not provided, `nussl` attempts to save the downloaded file in `~/.nussl/` (expanded) or in `tmp/.nussl`. If the requested file is already in :param:`local_folder` (or one of the two aforementioned directories) and the calculated hash matches the precomputed hash from the EFZ server metadata, then the file will not be downloaded. Args: audio_file_name: (str) Name of the audio file to attempt to download. local_folder: (str) Path to local folder in which to download the file. If no folder is provided, `nussl` will store the file in `~/.nussl/` (expanded) or in `/tmp/.nussl`. verbose (bool): If ``True`` prints the status of the download to the console. Returns: (String) Full path to the requested file (whether downloaded or not). Example: >>> import nussl >>> piano_path = nussl.efz_utils.download_audio_file('K0140.wav') >>> piano_signal = nussl.AudioSignal(piano_path) """ file_metadata = _download_metadata_for_file(audio_file_name, 'audio') file_hash = file_metadata['file_hash'] file_url = urljoin(constants.NUSSL_EFZ_AUDIO_URL, audio_file_name) result = _download_file(audio_file_name, file_url, local_folder, 'audio', file_hash=file_hash, verbose=verbose) return result def download_trained_model(model_name, local_folder=None, verbose=True): """ Downloads the specified trained model from the `nussl` External File Zoo (EFZ) server. The downloaded file is stored in :param:`local_folder` if a folder is provided. If a folder is not provided, `nussl` attempts to save the downloaded file in `~/.nussl/` (expanded) or in `tmp/.nussl`. If the requested file is already in :param:`local_folder` (or one of the two aforementioned directories) and the calculated hash matches the precomputed hash from the EFZ server metadata, then the file will not be downloaded. Args: model_name: (str) Name of the trained model to attempt to download. local_folder: (str) Path to local folder in which to download the file. If no folder is provided, `nussl` will store the file in `~/.nussl/` (expanded) or in `/tmp/.nussl`. verbose (bool): If ``True`` prints the status of the download to the console. Returns: (String) Full path to the requested file (whether downloaded or not). Example: >>> import nussl >>> model_path = nussl.efz_utils.download_trained_model('deep_clustering_model.h5') >>> signal = nussl.AudioSignal() >>> piano_signal = nussl.DeepClustering(signal, model_path=model_path) """ file_metadata = _download_metadata_for_file(model_name, 'model') file_hash = file_metadata['file_hash'] file_url = urljoin(constants.NUSSL_EFZ_MODELS_URL, model_name) result = _download_file(model_name, file_url, local_folder, 'models', file_hash=file_hash, verbose=verbose) return result def download_benchmark_file(benchmark_name, local_folder=None, verbose=True): """ Downloads the specified benchmark file from the `nussl` External File Zoo (EFZ) server. The downloaded file is stored in :param:`local_folder` if a folder is provided. If a folder is not provided, `nussl` attempts to save the downloaded file in `~/.nussl/` (expanded) or in `/tmp/.nussl`. If the requested file is already in :param:`local_folder` (or one of the two aforementioned directories) and the calculated hash matches the precomputed hash from the EFZ server metadata, then the file will not be downloaded. Args: benchmark_name: (str) Name of the trained model to attempt to download. local_folder: (str) Path to local folder in which to download the file. If no folder is provided, `nussl` will store the file in `~/.nussl/` (expanded) or in `tmp/.nussl`. verbose (bool): If ``True`` prints the status of the download to the console. Returns: (String) Full path to the requested file (whether downloaded or not). Example: >>> import nussl >>> import numpy as np >>> stm_atn_path = nussl.efz_utils.download_benchmark_file('benchmark_sym_atn.npy') >>> sym_atm = np.load(stm_atn_path) """ file_metadata = _download_metadata_for_file(benchmark_name, 'benchmark') file_hash = file_metadata['file_hash'] file_url = urljoin(constants.NUSSL_EFZ_BENCHMARKS_URL, benchmark_name) result = _download_file(benchmark_name, file_url, local_folder, 'benchmarks', file_hash=file_hash, verbose=verbose) return result def _download_file(file_name, url, local_folder, cache_subdir, file_hash=None, cache_dir=None, verbose=True): """ Downloads the specified file from the Heavily inspired by and lovingly adapted from keras' `get_file` function: https://github.com/fchollet/keras/blob/afbd5d34a3bdbb0916d558f96af197af1e92ce70/keras/utils/data_utils.py#L109 Args: file_name: (String) name of the file located on the server url: (String) url of the file local_folder: (String) alternate folder in which to download the file cache_subdir: (String) subdirectory of folder in which to download flie file_hash: (String) expected hash of downloaded file cache_dir: Returns: (String) local path to downloaded file """ if local_folder not in [None, '']: # local folder provided, let's create it if it doesn't exist and use it as datadir os.makedirs(os.path.expanduser(local_folder), exist_ok=True) datadir = os.path.expanduser(local_folder) else: if cache_dir is None: cache_dir = os.path.expanduser(os.path.join('~', '.nussl')) datadir_base = os.path.expanduser(cache_dir) datadir = os.path.join(datadir_base, cache_subdir) os.makedirs(datadir, exist_ok=True) file_path = os.path.join(datadir, file_name) download = False if os.path.exists(file_path): if file_hash is not None: # compare the provided hash with the hash of the file currently at file_path current_hash = _hash_file(file_path) # if the hashes are equal, we already have the file we need, so don't download if file_hash != current_hash: if verbose: warnings.warn( f'Hash for {file_path} does not match known hash. ' f' Downloading {file_name} from servers...' ) download = True elif verbose: print(f'Matching file found at {file_path}, skipping download.') else: download = True else: download = True if download: if verbose: print(f'Saving file at {file_path}\nDownloading {file_name} from {url}') def _dl_progress(count, block_size, total_size): percent = int(count * block_size * 100 / total_size) if percent <= 100: sys.stdout.write(f'\r{file_name}...{percent}%') sys.stdout.flush() try: try: reporthook = _dl_progress if verbose else None urlretrieve(url, file_path, reporthook) if verbose: print() # print a new line after the progress is done. except HTTPError as e: raise FailedDownloadError(f'URL fetch failure on {url}: {e.code} -- {e.msg}') except URLError as e: raise FailedDownloadError(f'URL fetch failure on {url}: {e.errno} -- {e.reason}') except (Exception, KeyboardInterrupt) as e: if os.path.exists(file_path): os.remove(file_path) raise e # check hash of received file to see if it matches the provided hash if file_hash is not None: download_hash = _hash_file(file_path) if file_hash != download_hash: # the downloaded file is not what it should be. Get rid of it. os.remove(file_path) raise MismatchedHashError( f'Deleted downloaded file ({file_path}) because of a hash mismatch.' ) return file_path else: return file_path def _hash_directory(directory, ext=None): """ Calculates the hash of every child file in the given directory using python's built-in SHA256 function (using `os.walk()`, which also searches subdirectories recursively). If :param:`ext` is specified, this will only look at files with extension provided. This function is used to verify the integrity of data sets for use with nussl. Pretty much just makes sure that when we loop through/look at a directory, we understand the structure because the organization of the data set directories for different data sets are all unique and thus need to be hard coded by each generator function (below). If we get a hash mismatch we can throw an error easily. Args: directory (str): Directory within which file hashes get calculated. Searches recursively. ext (str): If provided, this function will only calculate the hash on files with the given extension. Returns: (str): String containing only hexadecimal digits of the has of the contents of the given directory. """ hash_list = [] for path, sub_dirs, files in os.walk(directory): if ext is None: hash_list.extend([_hash_file(os.path.join(path, f)) for f in files if os.path.isfile(os.path.join(path, f))]) else: hash_list.extend([_hash_file(os.path.join(path, f)) for f in files if os.path.isfile(os.path.join(path, f)) if os.path.splitext(f)[1] == ext]) hasher = hashlib.sha256() for hash_val in sorted(hash_list): # Sort this list so we're platform agnostic hasher.update(hash_val.encode('utf-8')) return hasher.hexdigest() def _hash_file(file_path, chunk_size=65535): """ Args: file_path: System path to the file to be hashed chunk_size: size of chunks Returns: file_hash: the SHA256 hashed string in hex """ hasher = hashlib.sha256() with open(file_path, 'rb') as fpath_file: for chunk in iter(lambda: fpath_file.read(chunk_size), b''): hasher.update(chunk) return hasher.hexdigest() ######################################## # Error Classes ######################################## class NoConnectivityError(Exception): """ Exception class for lack of internet connection. """ pass class FailedDownloadError(Exception): """ Exception class for failed file downloads. """ pass class MismatchedHashError(Exception): """ Exception class for when a computed hash function does match a pre-computed hash. """ pass class MetadataError(Exception): """ Exception class for errors with metadata. """ pass
[ "\"\"\"\n", "The *nussl* External File Zoo (EFZ) is a server that houses all files that are too large to\n", "bundle with *nussl* when distributing it through ``pip`` or Github. These types of files include\n", "audio examples, benchmark files for tests, and trained neural network models.\n", "\n", "*nussl* has built-in utilities for accessing the EFZ through its API. Here, it is possible to\n", "see what files are available on the EFZ and download desired files. The EFZ utilities allow\n", "for such functionality.\n", "\"\"\"\n", "\n", "import warnings\n", "import json\n", "import os\n", "import sys\n", "import hashlib\n", "\n", "from six.moves.urllib_parse import urljoin\n", "from six.moves.urllib.error import HTTPError\n", "from six.moves.urllib.error import URLError\n", "from six.moves.urllib.request import urlopen, Request\n", "from six.moves.urllib.request import urlretrieve\n", "\n", "from . import constants\n", "\n", "\n", "def get_available_audio_files():\n", " \"\"\"\n", " Returns a list of dicts containing metadata of the available audio files on the nussl External\n", " File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).\n", "\n", " Each entry in the list is in the following format:\n", "\n", " .. code-block:: python\n", "\n", " {\n", " u'file_length_seconds': 5.00390022675737,\n", " u'visible': True,\n", " u'file_name': u'K0140.wav',\n", " u'date_modified': u'2018-06-01',\n", " u'file_hash': u'f0d8d3c8d199d3790b0e42d1e5df50a6801f928d10f533149ed0babe61b5d7b5',\n", " u'file_size_bytes': 441388,\n", " u'file_description': u'Acoustic piano playing middle C.',\n", " u'audio_attributes': u'piano, middle C',\n", " u'file_size': u'431.0KiB',\n", " u'date_added': u'2018-06-01'\n", " }\n", "\n", " See Also:\n", " * :func:`print_available_audio_files`, prints a list of the audio files to the console.\n", " * :func:`download_audio_file` to download an audio file from the EFZ.\n", "\n", " Returns:\n", " (list): A list of dicts containing metadata of the available audio files on the nussl\n", " External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).\n", "\n", " \"\"\"\n", " # _download_all_metadata() will throw its own errors, so no try block around it\n", " return _download_all_metadata(constants.NUSSL_EFZ_AUDIO_METADATA_URL)\n", "\n", "\n", "def print_available_audio_files():\n", " \"\"\"\n", " Prints a message to the console that shows all of the available audio files that are on the\n", " nussl External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).\n", "\n", " See Also:\n", " * :func:`get_available_audio_files` to get this same data from the EFZ server as a list.\n", " * :func:`download_audio_file` to download an audio file from the EFZ.\n", "\n", " Example:\n", " >>> import nussl\n", " >>> nussl.efz_utils.print_available_audio_files()\n", " File Name Duration (sec) Size Description\n", " dev1_female3_inst_mix.wav 10.0 1.7MiB Instantaneous mixture of three female speakers talking in a stereo field.\n", " dev1_female3_synthconv_130ms_5cm_mix.wav 10.0 1.7MiB Three female speakers talking in a stereo field, with 130ms of inter-channel delay.\n", " K0140.wav 5.0 431.0KiB Acoustic piano playing middle C.\n", " K0149.wav 5.0 430.0KiB Acoustic piano playing the A above middle C. (A440)\n", "\n", " To download one of these files insert the file name as the first parameter to\n", " :func:`download_audio_file`, like so:\n", "\n", " >>> nussl.efz_utils.download_audio_file('K0140.wav')\n", "\n", " \"\"\"\n", " file_metadata = get_available_audio_files()\n", "\n", " print(f'{\"File Name\":40} {\"Duration (sec)\":15} {\"Size\":10} {\"Description\":50}')\n", " for info in file_metadata:\n", " print(\n", " f'{info[\"file_name\"]:40} {info[\"file_length_seconds\"]:15} {info[\"file_size\"]:10}'\n", " f' {info[\"file_description\"]:50}'\n", " )\n", " print('To download one of these files insert the file name '\n", " 'as the first parameter to nussl.download_audio_file(), like so: \\n'\n", " ' >>> nussl.efz_utils.download_audio_file(\\'K0140.wav\\')')\n", "\n", "\n", "def get_available_trained_models():\n", " \"\"\"\n", " Returns a list of dicts containing metadata of the available trained models on the nussl\n", " External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).\n", "\n", " Each entry in the list is in the following format:\n", "\n", " .. code-block:: python\n", "\n", " {\n", " u'for_class': u'DeepClustering',\n", " u'visible': True,\n", " u'file_name': u'deep_clustering_vocals_44k_long.model',\n", " u'date_modified': u'2018-06-01',\n", " u'file_hash': u'e09034c2cb43a293ece0b121f113b8e4e1c5a247331c71f40cb9ca38227ccc2c',\n", " u'file_size_bytes': 94543355,\n", " u'file_description': u'Deep clustering for vocal separation trained on augmented DSD100.',\n", " u'file_size': u'90.2MiB',\n", " u'date_added': u'2018-06-01'\n", " }\n", "\n", " Notes:\n", " Most of the entries in the dictionary are self-explanatory, but note the ``for_class``\n", " entry. The ``for_class`` entry specifies which `nussl` separation class the given model will\n", " work with. Usually, `nussl` separation classes that require a model will default so\n", " retrieving a model on the EFZ server (if not already found on the user's machine), but\n", " sometimes it is desirable to use a model other than the default one provided. In this case,\n", " the ``for_class`` entry lets the user know which class it is valid for use with.\n", " Additionally, trying to load a model into a class that it is not explicitly labeled for that\n", " class will raise an exception. Just don't do it, ok?\n", "\n", " See Also:\n", " * :func:`print_available_trained_models`, prints a list of the trained models to\n", " the console.\n", " * :func:`download_trained_model` to download a trained model from the EFZ.\n", "\n", " Returns:\n", " (list): A list of dicts containing metadata of the available trained models on the nussl\n", " External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).\n", " \"\"\"\n", " return _download_all_metadata(constants.NUSSL_EFZ_MODEL_METADATA_URL)\n", "\n", "\n", "def print_available_trained_models():\n", " \"\"\"\n", " Prints a message to the console that shows all of the available trained models that are on the\n", " nussl External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).\n", "\n", " Notes:\n", " Most of the entries in the dictionary are self-explanatory, but note the ``for_class``\n", " entry. The ``for_class`` entry specifies which `nussl` separation class the given model will\n", " work with. Usually, `nussl` separation classes that require a model will default so\n", " retrieving a model on the EFZ server (if not already found on the user's machine), but\n", " sometimes it is desirable to use a model other than the default one provided. In this case,\n", " the ``for_class`` entry lets the user know which class it is valid for use with.\n", " Additionally, trying to load a model into a class that it is not explicitly labeled for that\n", " class will raise an exception. Just don't do it, ok?\n", "\n", " See Also:\n", " * :func:`get_available_trained_models` to get this same data from the EFZ server as a list.\n", " * :func:`download_trained_model` to download a trained model from the EFZ.\n", "\n", " Example:\n", " >>> import nussl\n", " >>> nussl.efz_utils.print_available_trained_models()\n", " File Name For Class Size Description\n", " deep_clustering_model.model DeepClustering 48.1MiB example Deep Clustering model\n", " deep_clustering_vocal_44k_long.model DeepClustering 90.2MiB trained DC model for vocal extraction\n", "\n", " To download one of these files insert the file name as the first parameter to download_trained_model(), like so:\n", "\n", " >>> nussl.efz_utils.download_trained_model('deep_clustering_model.h5')\n", "\n", " \"\"\"\n", " file_metadata = get_available_trained_models()\n", "\n", " print(f'{\"File Name\":40} {\"For Class\":15} {\"Size\":10} {\"Description\":50}')\n", " for info in file_metadata:\n", " print(\n", " f'{info[\"file_name\"]:40} {info[\"for_class\"]:15} {info[\"file_size\"]:10}'\n", " f' {info[\"file_description\"]:50}'\n", " )\n", " print('To download one of these files insert the file name '\n", " 'as the first parameter to nussl.download_trained_model, like so: \\n'\n", " ' >>> nussl.efz_utils.download_trained_model(\\'deep_clustering_model.h5\\')')\n", "\n", "\n", "def get_available_benchmark_files():\n", " \"\"\"\n", " Returns a list of dicts containing metadata of the available benchmark files for tests on the\n", " nussl External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).\n", "\n", " Each entry in the list is in the following format:\n", "\n", " .. code-block:: python\n", "\n", " {\n", " u'for_class': u'DuetUnitTests',\n", " u'visible': True, u'file_name':\n", " u'benchmark_atn_bins.npy',\n", " u'date_modified': u'2018-06-19',\n", " u'file_hash': u'cf7fef6f4ea9af3dbde8b9880602eeaf72507b6c78f04097c5e79d34404a8a1f',\n", " u'file_size_bytes': 488,\n", " u'file_description': u'Attenuation bins numpy array for DUET benchmark test.',\n", " u'file_size': u'488.0B',\n", " u'date_added': u'2018-06-19'\n", " }\n", "\n", " Notes:\n", " Most of the entries in the dictionary are self-explanatory, but note the `for_class`\n", " entry. The `for_class` entry specifies which `nussl` benchmark class will load the\n", " corresponding benchmark file. Make sure these match exactly when writing tests!\n", "\n", " See Also:\n", " * :func:`print_available_benchmark_files`, prints a list of the benchmark files to the\n", " console.\n", " * :func:`download_benchmark_file` to download an benchmark file from the EFZ.\n", "\n", " Returns:\n", " (list): A list of dicts containing metadata of the available audio files on the nussl\n", " External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).\n", "\n", " \"\"\"\n", " return _download_all_metadata(constants.NUSSL_EFZ_BENCHMARK_METADATA_URL)\n", "\n", "\n", "def print_available_benchmark_files():\n", " \"\"\"\n", " Prints a message to the console that shows all of the available benchmark files that are on the\n", " nussl External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).\n", "\n", " Example:\n", " >>> import nussl\n", " >>> nussl.efz_utils.print_available_benchmark_files()\n", " File Name For Class Size Description\n", " mix3_matlab_repet_foreground.mat TestRepet 6.4MiB Foreground matrix for Repet class benchmark test.\n", " benchmark_atn_bins.npy DuetUnitTests 488.0B Attenuation bins numpy array for DUET benchmark test.\n", " benchmark_sym_atn.npy DuetUnitTests 3.4MiB Symmetric attenuation histogram for the DUET benchmark test.\n", " benchmark_wmat.npy DuetUnitTests 3.4MiB Frequency matrix for the DUET benchmark test.\n", "\n", " To download one of these files insert the file name as the first parameter to nussl.download_benchmark_file, like so:\n", "\n", " >>> nussl.efz_utils.download_benchmark_file('example.npy')\n", "\n", " Notes:\n", " Most of the entries in the printed list are self-explanatory, but note the ``for_class``\n", " entry. The ``for_class`` entry specifies which `nussl` benchmark class will load the\n", " corresponding benchmark file. Make sure these match exactly when writing tests!\n", "\n", " See Also:\n", " * :func:`get_available_benchmark_files`, prints a list of the benchmark files to the\n", " console.\n", " * :func:`download_benchmark_file` to download an benchmark file from the EFZ.\n", "\n", " \"\"\"\n", " file_metadata = get_available_benchmark_files()\n", "\n", " print(f'{\"File Name\":40} {\"For Class\":15} {\"Size\":10} {\"Description\":50}')\n", " for info in file_metadata:\n", " print(\n", " f'{info[\"file_name\"]:40} {info[\"for_class\"]:15} {info[\"file_size\"]:10}'\n", " f' {info[\"file_description\"]:50}'\n", " )\n", " print('To download one of these files insert the file name '\n", " 'as the first parameter to nussl.download_benchmark_file, like so: \\n'\n", " ' >>> nussl.efz_utils.download_benchmark_file(\\'example.npy\\')')\n", "\n", "\n", "def _download_all_metadata(url):\n", " \"\"\"\n", " Downloads the json file that contains all of the metadata for a specific file type (read:\n", " audio files, benchmark files, or trained models) that is on the EFZ server. This is retrieved\n", " from one of following three URLs (which are stored in nussl.constants):\n", " NUSSL_EFZ_AUDIO_METADATA_URL, NUSSL_EFZ_BENCHMARK_METADATA_URL, or NUSSL_EFZ_MODEL_METADATA_URL.\n", "\n", " Args:\n", " url (str): URL for the EFZ server that has metadata. One of these three:\n", " NUSSL_EFZ_AUDIO_METADATA_URL, NUSSL_EFZ_BENCHMARK_METADATA_URL, or\n", " NUSSL_EFZ_MODEL_METADATA_URL.\n", "\n", " Returns:\n", " (list): List of dicts with metadata for the desired file type.\n", "\n", " \"\"\"\n", " request = Request(url)\n", "\n", " # Make sure to get the newest data\n", " request.add_header('Pragma', 'no-cache')\n", " request.add_header('Cache-Control', 'max-age=0')\n", " try:\n", " return json.loads(urlopen(request).read())\n", " except:\n", " raise NoConnectivityError(\"Can't connect to internet\")\n", "\n", "\n", "def _download_metadata_for_file(file_name, file_type):\n", " \"\"\"\n", " Downloads the metadata entry for a specific file (:param:`file_name`) on the EFZ server.\n", "\n", " Args:\n", " file_name (str): File name as specified on the EFZ server.\n", " file_type (str): 'Type' of file, either 'audio', 'model', or 'benchmark'.\n", "\n", " Returns:\n", " (dict) Metadata entry for the specified file, or ``None`` if it could not be located.\n", "\n", " \"\"\"\n", "\n", " metadata_urls = {\n", " 'audio': constants.NUSSL_EFZ_AUDIO_METADATA_URL,\n", " 'benchmark': constants.NUSSL_EFZ_BENCHMARK_METADATA_URL,\n", " 'model': constants.NUSSL_EFZ_MODEL_METADATA_URL,\n", " }\n", "\n", " if file_type in metadata_urls:\n", " metadata_url = metadata_urls[file_type]\n", " else:\n", " # wrong file type, return\n", " raise MetadataError(f'Cannot find metadata of type {file_type}.')\n", "\n", " metadata = _download_all_metadata(metadata_url)\n", "\n", " for file_metadata in metadata:\n", " if file_metadata['file_name'] == file_name:\n", " return file_metadata\n", "\n", " raise MetadataError(\n", " f'No matching metadata for file {file_name}'\n", " f' at url {constants.NUSSL_EFZ_AUDIO_METADATA_URL}!'\n", " )\n", "\n", "\n", "def download_audio_file(audio_file_name, local_folder=None, verbose=True):\n", " \"\"\"\n", " Downloads the specified audio file from the `nussl` External File Zoo (EFZ) server. The\n", " downloaded file is stored in :param:`local_folder` if a folder is provided. If a folder is\n", " not provided, `nussl` attempts to save the downloaded file in `~/.nussl/` (expanded) or in\n", " `tmp/.nussl`. If the requested file is already in :param:`local_folder` (or one of the two\n", " aforementioned directories) and the calculated hash matches the precomputed hash from the EFZ\n", " server metadata, then the file will not be downloaded.\n", "\n", " Args:\n", " audio_file_name: (str) Name of the audio file to attempt to download.\n", " local_folder: (str) Path to local folder in which to download the file.\n", " If no folder is provided, `nussl` will store the file in `~/.nussl/` (expanded) or in\n", " `/tmp/.nussl`.\n", " verbose (bool): If ``True`` prints the status of the download to the console.\n", "\n", " Returns:\n", " (String) Full path to the requested file (whether downloaded or not).\n", "\n", " Example:\n", " >>> import nussl\n", " >>> piano_path = nussl.efz_utils.download_audio_file('K0140.wav')\n", " >>> piano_signal = nussl.AudioSignal(piano_path)\n", "\n", " \"\"\"\n", " file_metadata = _download_metadata_for_file(audio_file_name, 'audio')\n", "\n", " file_hash = file_metadata['file_hash']\n", "\n", " file_url = urljoin(constants.NUSSL_EFZ_AUDIO_URL, audio_file_name)\n", " result = _download_file(audio_file_name, file_url, local_folder, 'audio',\n", " file_hash=file_hash, verbose=verbose)\n", "\n", " return result\n", "\n", "\n", "def download_trained_model(model_name, local_folder=None, verbose=True):\n", " \"\"\"\n", " Downloads the specified trained model from the `nussl` External File Zoo (EFZ) server. The\n", " downloaded file is stored in :param:`local_folder` if a folder is provided. If a folder is\n", " not provided, `nussl` attempts to save the downloaded file in `~/.nussl/` (expanded) or in\n", " `tmp/.nussl`. If the requested file is already in :param:`local_folder` (or one of the two\n", " aforementioned directories) and the calculated hash matches the precomputed hash from the EFZ\n", " server metadata, then the file will not be downloaded.\n", "\n", " Args:\n", " model_name: (str) Name of the trained model to attempt to download.\n", " local_folder: (str) Path to local folder in which to download the file.\n", " If no folder is provided, `nussl` will store the file in `~/.nussl/` (expanded) or in\n", " `/tmp/.nussl`.\n", " verbose (bool): If ``True`` prints the status of the download to the console.\n", "\n", " Returns:\n", " (String) Full path to the requested file (whether downloaded or not).\n", "\n", " Example:\n", " >>> import nussl\n", " >>> model_path = nussl.efz_utils.download_trained_model('deep_clustering_model.h5')\n", " >>> signal = nussl.AudioSignal()\n", " >>> piano_signal = nussl.DeepClustering(signal, model_path=model_path)\n", "\n", " \"\"\"\n", " file_metadata = _download_metadata_for_file(model_name, 'model')\n", "\n", " file_hash = file_metadata['file_hash']\n", "\n", " file_url = urljoin(constants.NUSSL_EFZ_MODELS_URL, model_name)\n", " result = _download_file(model_name, file_url, local_folder, 'models',\n", " file_hash=file_hash, verbose=verbose)\n", "\n", " return result\n", "\n", "\n", "def download_benchmark_file(benchmark_name, local_folder=None, verbose=True):\n", " \"\"\"\n", " Downloads the specified benchmark file from the `nussl` External File Zoo (EFZ) server. The\n", " downloaded file is stored in :param:`local_folder` if a folder is provided. If a folder is\n", " not provided, `nussl` attempts to save the downloaded file in `~/.nussl/` (expanded) or in\n", " `/tmp/.nussl`. If the requested file is already in :param:`local_folder` (or one of the two\n", " aforementioned directories) and the calculated hash matches the precomputed hash from the EFZ\n", " server metadata, then the file will not be downloaded.\n", "\n", " Args:\n", " benchmark_name: (str) Name of the trained model to attempt to download.\n", " local_folder: (str) Path to local folder in which to download the file.\n", " If no folder is provided, `nussl` will store the file in `~/.nussl/` (expanded) or in\n", " `tmp/.nussl`.\n", " verbose (bool): If ``True`` prints the status of the download to the console.\n", "\n", " Returns:\n", " (String) Full path to the requested file (whether downloaded or not).\n", "\n", " Example:\n", " >>> import nussl\n", " >>> import numpy as np\n", " >>> stm_atn_path = nussl.efz_utils.download_benchmark_file('benchmark_sym_atn.npy')\n", " >>> sym_atm = np.load(stm_atn_path)\n", "\n", " \"\"\"\n", " file_metadata = _download_metadata_for_file(benchmark_name, 'benchmark')\n", "\n", " file_hash = file_metadata['file_hash']\n", "\n", " file_url = urljoin(constants.NUSSL_EFZ_BENCHMARKS_URL, benchmark_name)\n", " result = _download_file(benchmark_name, file_url, local_folder, 'benchmarks',\n", " file_hash=file_hash, verbose=verbose)\n", "\n", " return result\n", "\n", "\n", "def _download_file(file_name, url, local_folder, cache_subdir,\n", " file_hash=None, cache_dir=None, verbose=True):\n", " \"\"\"\n", " Downloads the specified file from the\n", "\n", " Heavily inspired by and lovingly adapted from keras' `get_file` function:\n", " https://github.com/fchollet/keras/blob/afbd5d34a3bdbb0916d558f96af197af1e92ce70/keras/utils/data_utils.py#L109\n", "\n", " Args:\n", " file_name: (String) name of the file located on the server\n", " url: (String) url of the file\n", " local_folder: (String) alternate folder in which to download the file\n", " cache_subdir: (String) subdirectory of folder in which to download flie\n", " file_hash: (String) expected hash of downloaded file\n", " cache_dir:\n", "\n", " Returns:\n", " (String) local path to downloaded file\n", "\n", " \"\"\"\n", " if local_folder not in [None, '']:\n", " # local folder provided, let's create it if it doesn't exist and use it as datadir\n", " os.makedirs(os.path.expanduser(local_folder), exist_ok=True)\n", " datadir = os.path.expanduser(local_folder)\n", " else:\n", " if cache_dir is None:\n", " cache_dir = os.path.expanduser(os.path.join('~', '.nussl'))\n", " datadir_base = os.path.expanduser(cache_dir)\n", " datadir = os.path.join(datadir_base, cache_subdir)\n", " os.makedirs(datadir, exist_ok=True)\n", "\n", " file_path = os.path.join(datadir, file_name)\n", "\n", " download = False\n", " if os.path.exists(file_path):\n", " if file_hash is not None:\n", " # compare the provided hash with the hash of the file currently at file_path\n", " current_hash = _hash_file(file_path)\n", "\n", " # if the hashes are equal, we already have the file we need, so don't download\n", " if file_hash != current_hash:\n", " if verbose:\n", " warnings.warn(\n", " f'Hash for {file_path} does not match known hash. '\n", " f' Downloading {file_name} from servers...'\n", " )\n", " download = True\n", " elif verbose:\n", " print(f'Matching file found at {file_path}, skipping download.')\n", "\n", " else:\n", " download = True\n", "\n", " else:\n", " download = True\n", "\n", " if download:\n", " if verbose:\n", " print(f'Saving file at {file_path}\\nDownloading {file_name} from {url}')\n", "\n", " def _dl_progress(count, block_size, total_size):\n", " percent = int(count * block_size * 100 / total_size)\n", "\n", " if percent <= 100:\n", " sys.stdout.write(f'\\r{file_name}...{percent}%')\n", " sys.stdout.flush()\n", "\n", " try:\n", " try:\n", " reporthook = _dl_progress if verbose else None\n", " urlretrieve(url, file_path, reporthook)\n", " if verbose: print() # print a new line after the progress is done.\n", "\n", " except HTTPError as e:\n", " raise FailedDownloadError(f'URL fetch failure on {url}: {e.code} -- {e.msg}')\n", " except URLError as e:\n", " raise FailedDownloadError(f'URL fetch failure on {url}: {e.errno} -- {e.reason}')\n", " except (Exception, KeyboardInterrupt) as e:\n", " if os.path.exists(file_path):\n", " os.remove(file_path)\n", " raise e\n", "\n", " # check hash of received file to see if it matches the provided hash\n", " if file_hash is not None:\n", " download_hash = _hash_file(file_path)\n", " if file_hash != download_hash:\n", " # the downloaded file is not what it should be. Get rid of it.\n", " os.remove(file_path)\n", " raise MismatchedHashError(\n", " f'Deleted downloaded file ({file_path}) because of a hash mismatch.'\n", " )\n", "\n", " return file_path\n", "\n", " else:\n", " return file_path\n", "\n", "\n", "def _hash_directory(directory, ext=None):\n", " \"\"\"\n", " Calculates the hash of every child file in the given directory using python's built-in SHA256\n", " function (using `os.walk()`, which also searches subdirectories recursively). If :param:`ext`\n", " is specified, this will only look at files with extension provided.\n", "\n", " This function is used to verify the integrity of data sets for use with nussl. Pretty much\n", " just makes sure that when we loop through/look at a directory, we understand the structure\n", " because the organization of the data set directories for different data sets are all unique\n", " and thus need to be hard coded by each generator function (below). If we get a hash mismatch\n", " we can throw an error easily.\n", "\n", " Args:\n", " directory (str): Directory within which file hashes get calculated. Searches recursively.\n", " ext (str): If provided, this function will only calculate the hash on files with the given\n", " extension.\n", "\n", " Returns:\n", " (str): String containing only hexadecimal digits of the has of the\n", " contents of the given directory.\n", "\n", " \"\"\"\n", " hash_list = []\n", " for path, sub_dirs, files in os.walk(directory):\n", " if ext is None:\n", " hash_list.extend([_hash_file(os.path.join(path, f)) for f in files\n", " if os.path.isfile(os.path.join(path, f))])\n", " else:\n", " hash_list.extend([_hash_file(os.path.join(path, f)) for f in files\n", " if os.path.isfile(os.path.join(path, f))\n", " if os.path.splitext(f)[1] == ext])\n", "\n", " hasher = hashlib.sha256()\n", " for hash_val in sorted(hash_list): # Sort this list so we're platform agnostic\n", " hasher.update(hash_val.encode('utf-8'))\n", "\n", " return hasher.hexdigest()\n", "\n", "\n", "def _hash_file(file_path, chunk_size=65535):\n", " \"\"\"\n", "\n", " Args:\n", " file_path: System path to the file to be hashed\n", " chunk_size: size of chunks\n", "\n", " Returns:\n", " file_hash: the SHA256 hashed string in hex\n", "\n", " \"\"\"\n", " hasher = hashlib.sha256()\n", "\n", " with open(file_path, 'rb') as fpath_file:\n", " for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n", " hasher.update(chunk)\n", "\n", " return hasher.hexdigest()\n", "\n", "\n", "########################################\n", "# Error Classes\n", "########################################\n", "\n", "\n", "class NoConnectivityError(Exception):\n", " \"\"\"\n", " Exception class for lack of internet connection.\n", " \"\"\"\n", " pass\n", "\n", "\n", "class FailedDownloadError(Exception):\n", " \"\"\"\n", " Exception class for failed file downloads.\n", " \"\"\"\n", " pass\n", "\n", "\n", "class MismatchedHashError(Exception):\n", " \"\"\"\n", " Exception class for when a computed hash function does match a pre-computed hash.\n", " \"\"\"\n", " pass\n", "\n", "\n", "class MetadataError(Exception):\n", " \"\"\"\n", " Exception class for errors with metadata.\n", " \"\"\"\n", " pass\n" ]
[ 0, 0.010869565217391304, 0.010309278350515464, 0, 0, 0.010638297872340425, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010101010101010102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010526315789473684, 0, 0, 0, 0, 0, 0, 0, 0, 0.010416666666666666, 0, 0, 0, 0.010638297872340425, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0.010416666666666666, 0, 0, 0, 0.010309278350515464, 0, 0, 0, 0, 0, 0.011363636363636364, 0.006666666666666667, 0.00625, 0.009174311926605505, 0.0078125, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0.010638297872340425, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010752688172043012, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010526315789473684, 0, 0.009708737864077669, 0, 0, 0, 0, 0, 0.010526315789473684, 0.009900990099009901, 0.010869565217391304, 0.010526315789473684, 0.01, 0.011235955056179775, 0.009900990099009901, 0, 0, 0, 0.011235955056179775, 0, 0.012048192771084338, 0, 0, 0.010309278350515464, 0, 0, 0, 0, 0, 0, 0, 0.010101010101010102, 0, 0, 0, 0.010526315789473684, 0.009900990099009901, 0.010869565217391304, 0.010526315789473684, 0.01, 0.011235955056179775, 0.009900990099009901, 0, 0, 0, 0.01, 0.012048192771084338, 0, 0, 0, 0, 0.010752688172043012, 0.009009009009009009, 0.008403361344537815, 0, 0.008547008547008548, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0.01020408163265306, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010526315789473684, 0, 0.01098901098901099, 0, 0, 0, 0, 0, 0.010752688172043012, 0.01098901098901099, 0.011363636363636364, 0, 0, 0.010526315789473684, 0, 0.011627906976744186, 0, 0, 0.010638297872340425, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0, 0, 0.010752688172043012, 0.007633587786259542, 0.007407407407407408, 0.007042253521126761, 0.007874015748031496, 0, 0.00819672131147541, 0, 0, 0, 0, 0.010309278350515464, 0.010752688172043012, 0.011363636363636364, 0, 0, 0.010752688172043012, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0.010638297872340425, 0.01020408163265306, 0, 0.009900990099009901, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0.010752688172043012, 0, 0, 0, 0.012195121951219513, 0, 0, 0.010638297872340425, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010869565217391304, 0.010526315789473684, 0.010526315789473684, 0.010526315789473684, 0.01020408163265306, 0, 0, 0, 0, 0, 0.01020408163265306, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010526315789473684, 0.010526315789473684, 0.010526315789473684, 0.010526315789473684, 0.01020408163265306, 0, 0, 0, 0, 0, 0.01020408163265306, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010416666666666666, 0.010526315789473684, 0.010526315789473684, 0.010416666666666666, 0.01020408163265306, 0, 0, 0, 0, 0, 0.01020408163265306, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0, 0, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01098901098901099, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011235955056179775, 0, 0, 0.01098901098901099, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.023809523809523808, 0, 0, 0.010638297872340425, 0, 0.01020408163265306, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011235955056179775, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01020408163265306, 0.01020408163265306, 0, 0, 0.010526315789473684, 0.010526315789473684, 0.010416666666666666, 0.010309278350515464, 0, 0, 0, 0.01020408163265306, 0.010101010101010102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
627
0.00218
#!/usr/bin/env python #code source #https://github.com/axon-research/c3d-keras/blob/master/test_model.py import matplotlib matplotlib.use('Agg') from keras.models import model_from_json import os import cv2 import time import numpy as np import matplotlib.pyplot as plt import c3d_keras_model as c3d_model import sys import keras.backend as K dim_ordering = K.image_dim_ordering() PROJ_DIR = os.path.dirname(os.path.abspath(__file__)) NUM_CLASSES = 2 CLIP_LENGTH = 16 TEST_VIDEO_LOAD_PATH = os.path.join(PROJ_DIR,'..','aggr_vids/') print "[Info] image_dim_order (from default ~/.keras/keras.json)={}".format( dim_ordering) backend = dim_ordering def diagnose(data, verbose=True, label='input', plots=False, backend='tf'): # Convolution3D? if data.ndim > 2: if backend == 'th': data = np.transpose(data, (1, 2, 3, 0)) #else: # data = np.transpose(data, (0, 2, 1, 3)) min_num_spatial_axes = 10 max_outputs_to_show = 3 ndim = data.ndim print "[Info] {}.ndim={}".format(label, ndim) print "[Info] {}.shape={}".format(label, data.shape) for d in range(ndim): num_this_dim = data.shape[d] if num_this_dim >= min_num_spatial_axes: # check for spatial axes # just first, center, last indices range_this_dim = [0, num_this_dim/2, num_this_dim - 1] else: # sweep all indices for non-spatial axes range_this_dim = range(num_this_dim) for i in range_this_dim: new_dim = tuple([d] + range(d) + range(d + 1, ndim)) sliced = np.transpose(data, new_dim)[i, ...] print("[Info] {}, dim:{} {}-th slice: " "(min, max, mean, std)=({}, {}, {}, {})".format( label, d, i, np.min(sliced), np.max(sliced), np.mean(sliced), np.std(sliced))) if plots: # assume (l, h, w, c)-shaped input if data.ndim != 4: print("[Error] data (shape={}) is not 4-dim. Check data".format( data.shape)) return l, h, w, c = data.shape if l >= min_num_spatial_axes or \ h < min_num_spatial_axes or \ w < min_num_spatial_axes: print("[Error] data (shape={}) does not look like in (l,h,w,c) " "format. Do reshape/transpose.".format(data.shape)) return nrows = int(np.ceil(np.sqrt(data.shape[0]))) # BGR if c == 3: for i in range(l): mng = plt.get_current_fig_manager() mng.resize(*mng.window.maxsize()) plt.subplot(nrows, nrows, i + 1) # doh, one-based! im = np.squeeze(data[i, ...]).astype(np.float32) im = im[:, :, ::-1] # BGR to RGB # force it to range [0,1] im_min, im_max = im.min(), im.max() if im_max > im_min: im_std = (im - im_min) / (im_max - im_min) else: print "[Warning] image is constant!" im_std = np.zeros_like(im) plt.imshow(im_std) plt.axis('off') plt.title("{}: t={}".format(label, i)) plt.show() #plt.waitforbuttonpress() else: for j in range(min(c, max_outputs_to_show)): for i in range(l): mng = plt.get_current_fig_manager() mng.resize(*mng.window.maxsize()) plt.subplot(nrows, nrows, i + 1) # doh, one-based! im = np.squeeze(data[i, ...]).astype(np.float32) im = im[:, :, j] # force it to range [0,1] im_min, im_max = im.min(), im.max() if im_max > im_min: im_std = (im - im_min) / (im_max - im_min) else: print "[Warning] image is constant!" im_std = np.zeros_like(im) plt.imshow(im_std) plt.axis('off') plt.title("{}: o={}, t={}".format(label, j, i)) plt.show() #plt.waitforbuttonpress() elif data.ndim == 1: print("[Info] {} (min, max, mean, std)=({}, {}, {}, {})".format( label, np.min(data), np.max(data), np.mean(data), np.std(data))) print("[Info] data[:10]={}".format(data[:10])) return def main(model_name): #'dM06AMFLsrc.mp4' TEST_VIDEO = sys.argv[1] show_images = False diagnose_plots = False #model_dir = './models' model_dir = os.path.join(PROJ_DIR,'log_models') global backend # override backend if provided as an input arg #if len(sys.argv) > 1: #if 'tf' in sys.argv[1].lower(): backend = 'tf' #else: #backend = 'th' print "[Info] Using backend={}".format(backend) if backend == 'tf': model_weight_filename = os.path.join(model_dir, model_name) model_json_filename = os.path.join(model_dir, 'sports1M_model_custom_2l.json') else: print 'please change to backend=tf' print("[Info] Reading model architecture...") model = model_from_json(open(model_json_filename, 'r').read()) #model = c3d_model.get_model(backend=backend) # visualize model model_img_filename = os.path.join(model_dir, 'c3d_model.png') if not os.path.exists(model_img_filename): from keras.utils import plot_model plot_model(model, to_file=model_img_filename) print("[Info] Loading model weights...") model.load_weights(model_weight_filename) print("[Info] Loading model weights -- DONE!") model.compile(loss='mean_squared_error', optimizer='sgd') print("[Info] Loading labels...") with open('labels_aggr.txt', 'r') as f: labels = [line.strip() for line in f.readlines()] print('Total labels: {}'.format(len(labels))) print("[Info] Loading a sample video...") cap = cv2.VideoCapture(TEST_VIDEO) vid = [] while True: ret, img = cap.read() if not ret: break vid.append(cv2.resize(img, (171, 128))) vid = np.array(vid, dtype=np.float32) #plt.imshow(vid[2000]/256) #plt.show() # sample 16-frame clip #start_frame = 100 start_frame = 2 X = vid[start_frame:(start_frame + 16), :, :, :] #diagnose(X, verbose=True, label='X (16-frame clip)', plots=show_images) # subtract mean subtract_mean = False if subtract_mean: mean_cube = np.load('models/train01_16_128_171_mean.npy') mean_cube = np.transpose(mean_cube, (1, 2, 3, 0)) #diagnose(mean_cube, verbose=True, label='Mean cube', plots=show_images) X -= mean_cube #diagnose(X, verbose=True, label='Mean-subtracted X', plots=show_images) # center crop X = X[:, 8:120, 30:142, :] # (l, h, w, c) #diagnose(X, verbose=True, label='Center-cropped X', plots=show_images) if backend == 'th': X = np.transpose(X, (3, 0, 1, 2)) # input_shape = (3,16,112,112) else: pass # input_shape = (16,112,112,3) # get activations for intermediate layers if needed inspect_layers = [ # 'fc6', # 'fc7', ] for layer in inspect_layers: int_model = c3d_model.get_int_model(model=model, layer=layer, backend=backend) int_output = int_model.predict_on_batch(np.array([X])) int_output = int_output[0, ...] print "[Debug] at layer={}: output.shape={}".format(layer, int_output.shape) diagnose(int_output, verbose=True, label='{} activation'.format(layer), plots=diagnose_plots, backend=backend) # inference time_start = time.time() output = model.predict_on_batch(np.array([X])) time_end = time.time() print 'time elapsed for model.predict:', time_end-time_start,'s' # show results print('Saving class probabilitities in probabilities.png') plt.plot(output[0]) plt.title('Probability') plt.savefig("probabilities.png") print('Position of maximum probability: {}'.format(output[0].argmax())) print('Maximum probability: {:.5f}'.format(max(output[0]))) print('Corresponding label: {}'.format(labels[output[0].argmax()])) # sort top five predictions from softmax output top_inds = output[0].argsort()[::-1][:5] # reverse sort and take five largest items print('\nTop 5 probabilities and labels:') for i in top_inds: print('{1}: {0:.5f}'.format(output[0][i], labels[i])) if __name__ == '__main__': model_name = 'No1_k_01-0.62_0.27.hdf5' main(model_name)
[ "#!/usr/bin/env python\n", "#code source\n", "#https://github.com/axon-research/c3d-keras/blob/master/test_model.py\n", "import matplotlib\n", "matplotlib.use('Agg')\n", "from keras.models import model_from_json\n", "import os\n", "import cv2\n", "import time\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "import c3d_keras_model as c3d_model\n", "import sys\n", "import keras.backend as K\n", "dim_ordering = K.image_dim_ordering()\n", "\n", "PROJ_DIR = os.path.dirname(os.path.abspath(__file__))\n", "NUM_CLASSES = 2\n", "CLIP_LENGTH = 16\n", "TEST_VIDEO_LOAD_PATH = os.path.join(PROJ_DIR,'..','aggr_vids/')\n", "\n", "print \"[Info] image_dim_order (from default ~/.keras/keras.json)={}\".format(\n", " dim_ordering)\n", "backend = dim_ordering\n", "\n", "def diagnose(data, verbose=True, label='input', plots=False, backend='tf'):\n", " # Convolution3D?\n", " if data.ndim > 2:\n", " if backend == 'th':\n", " data = np.transpose(data, (1, 2, 3, 0))\n", " #else:\n", " # data = np.transpose(data, (0, 2, 1, 3))\n", " min_num_spatial_axes = 10\n", " max_outputs_to_show = 3\n", " ndim = data.ndim\n", " print \"[Info] {}.ndim={}\".format(label, ndim)\n", " print \"[Info] {}.shape={}\".format(label, data.shape)\n", " for d in range(ndim):\n", " num_this_dim = data.shape[d]\n", " if num_this_dim >= min_num_spatial_axes: # check for spatial axes\n", " # just first, center, last indices\n", " range_this_dim = [0, num_this_dim/2, num_this_dim - 1]\n", " else:\n", " # sweep all indices for non-spatial axes\n", " range_this_dim = range(num_this_dim)\n", " for i in range_this_dim:\n", " new_dim = tuple([d] + range(d) + range(d + 1, ndim))\n", " sliced = np.transpose(data, new_dim)[i, ...]\n", " print(\"[Info] {}, dim:{} {}-th slice: \"\n", " \"(min, max, mean, std)=({}, {}, {}, {})\".format(\n", " label,\n", " d, i,\n", " np.min(sliced),\n", " np.max(sliced),\n", " np.mean(sliced),\n", " np.std(sliced)))\n", " if plots:\n", " # assume (l, h, w, c)-shaped input\n", " if data.ndim != 4:\n", " print(\"[Error] data (shape={}) is not 4-dim. Check data\".format(\n", " data.shape))\n", " return\n", " l, h, w, c = data.shape\n", " if l >= min_num_spatial_axes or \\\n", " h < min_num_spatial_axes or \\\n", " w < min_num_spatial_axes:\n", " print(\"[Error] data (shape={}) does not look like in (l,h,w,c) \"\n", " \"format. Do reshape/transpose.\".format(data.shape))\n", " return\n", " nrows = int(np.ceil(np.sqrt(data.shape[0])))\n", " # BGR\n", " if c == 3:\n", " for i in range(l):\n", " mng = plt.get_current_fig_manager()\n", " mng.resize(*mng.window.maxsize())\n", " plt.subplot(nrows, nrows, i + 1) # doh, one-based!\n", " im = np.squeeze(data[i, ...]).astype(np.float32)\n", " im = im[:, :, ::-1] # BGR to RGB\n", " # force it to range [0,1]\n", " im_min, im_max = im.min(), im.max()\n", " if im_max > im_min:\n", " im_std = (im - im_min) / (im_max - im_min)\n", " else:\n", " print \"[Warning] image is constant!\"\n", " im_std = np.zeros_like(im)\n", " plt.imshow(im_std)\n", " plt.axis('off')\n", " plt.title(\"{}: t={}\".format(label, i))\n", " plt.show()\n", " #plt.waitforbuttonpress()\n", " else:\n", " for j in range(min(c, max_outputs_to_show)):\n", " for i in range(l):\n", " mng = plt.get_current_fig_manager()\n", " mng.resize(*mng.window.maxsize())\n", " plt.subplot(nrows, nrows, i + 1) # doh, one-based!\n", " im = np.squeeze(data[i, ...]).astype(np.float32)\n", " im = im[:, :, j]\n", " # force it to range [0,1]\n", " im_min, im_max = im.min(), im.max()\n", " if im_max > im_min:\n", " im_std = (im - im_min) / (im_max - im_min)\n", " else:\n", " print \"[Warning] image is constant!\"\n", " im_std = np.zeros_like(im)\n", " plt.imshow(im_std)\n", " plt.axis('off')\n", " plt.title(\"{}: o={}, t={}\".format(label, j, i))\n", " plt.show()\n", " #plt.waitforbuttonpress()\n", " elif data.ndim == 1:\n", " print(\"[Info] {} (min, max, mean, std)=({}, {}, {}, {})\".format(\n", " label,\n", " np.min(data),\n", " np.max(data),\n", " np.mean(data),\n", " np.std(data)))\n", " print(\"[Info] data[:10]={}\".format(data[:10]))\n", "\n", " return\n", "\n", "def main(model_name):\n", " #'dM06AMFLsrc.mp4'\n", " TEST_VIDEO = sys.argv[1]\n", " show_images = False\n", " diagnose_plots = False\n", " #model_dir = './models'\n", " model_dir = os.path.join(PROJ_DIR,'log_models')\n", " global backend\n", "\n", " # override backend if provided as an input arg\n", " #if len(sys.argv) > 1:\n", " #if 'tf' in sys.argv[1].lower():\n", " backend = 'tf'\n", " #else:\n", " #backend = 'th'\n", " print \"[Info] Using backend={}\".format(backend)\n", "\n", " if backend == 'tf':\n", " model_weight_filename = os.path.join(model_dir, model_name)\n", " model_json_filename = os.path.join(model_dir, 'sports1M_model_custom_2l.json')\n", " else:\n", " print 'please change to backend=tf'\n", "\n", " print(\"[Info] Reading model architecture...\")\n", " model = model_from_json(open(model_json_filename, 'r').read())\n", " #model = c3d_model.get_model(backend=backend)\n", "\n", " # visualize model\n", " model_img_filename = os.path.join(model_dir, 'c3d_model.png')\n", " if not os.path.exists(model_img_filename):\n", " from keras.utils import plot_model\n", " plot_model(model, to_file=model_img_filename)\n", "\n", " print(\"[Info] Loading model weights...\")\n", " model.load_weights(model_weight_filename)\n", " print(\"[Info] Loading model weights -- DONE!\")\n", " model.compile(loss='mean_squared_error', optimizer='sgd')\n", "\n", " print(\"[Info] Loading labels...\")\n", " with open('labels_aggr.txt', 'r') as f:\n", " labels = [line.strip() for line in f.readlines()]\n", " print('Total labels: {}'.format(len(labels)))\n", "\n", " print(\"[Info] Loading a sample video...\")\n", " cap = cv2.VideoCapture(TEST_VIDEO)\n", "\n", " vid = []\n", " while True:\n", " ret, img = cap.read()\n", " if not ret:\n", " break\n", " vid.append(cv2.resize(img, (171, 128)))\n", " vid = np.array(vid, dtype=np.float32)\n", "\n", " #plt.imshow(vid[2000]/256)\n", " #plt.show()\n", "\n", " # sample 16-frame clip\n", " #start_frame = 100\n", " start_frame = 2\n", " X = vid[start_frame:(start_frame + 16), :, :, :]\n", " #diagnose(X, verbose=True, label='X (16-frame clip)', plots=show_images)\n", "\n", " # subtract mean\n", " subtract_mean = False\n", " if subtract_mean:\n", " mean_cube = np.load('models/train01_16_128_171_mean.npy')\n", " mean_cube = np.transpose(mean_cube, (1, 2, 3, 0))\n", " #diagnose(mean_cube, verbose=True, label='Mean cube', plots=show_images)\n", " X -= mean_cube\n", " #diagnose(X, verbose=True, label='Mean-subtracted X', plots=show_images)\n", "\n", " # center crop\n", " X = X[:, 8:120, 30:142, :] # (l, h, w, c)\n", " #diagnose(X, verbose=True, label='Center-cropped X', plots=show_images)\n", "\n", " if backend == 'th':\n", " X = np.transpose(X, (3, 0, 1, 2)) # input_shape = (3,16,112,112)\n", " else:\n", " pass # input_shape = (16,112,112,3)\n", "\n", " # get activations for intermediate layers if needed\n", " inspect_layers = [\n", " # 'fc6',\n", " # 'fc7',\n", " ]\n", " for layer in inspect_layers:\n", " int_model = c3d_model.get_int_model(model=model, layer=layer, backend=backend)\n", " int_output = int_model.predict_on_batch(np.array([X]))\n", " int_output = int_output[0, ...]\n", " print \"[Debug] at layer={}: output.shape={}\".format(layer, int_output.shape)\n", " diagnose(int_output,\n", " verbose=True,\n", " label='{} activation'.format(layer),\n", " plots=diagnose_plots,\n", " backend=backend)\n", "\n", " # inference\n", " time_start = time.time()\n", " output = model.predict_on_batch(np.array([X]))\n", " time_end = time.time()\n", " print 'time elapsed for model.predict:', time_end-time_start,'s'\n", " # show results\n", " print('Saving class probabilitities in probabilities.png')\n", " plt.plot(output[0])\n", " plt.title('Probability')\n", " plt.savefig(\"probabilities.png\")\n", " print('Position of maximum probability: {}'.format(output[0].argmax()))\n", " print('Maximum probability: {:.5f}'.format(max(output[0])))\n", " print('Corresponding label: {}'.format(labels[output[0].argmax()]))\n", "\n", " # sort top five predictions from softmax output\n", " top_inds = output[0].argsort()[::-1][:5] # reverse sort and take five largest items\n", " print('\\nTop 5 probabilities and labels:')\n", " for i in top_inds:\n", " print('{1}: {0:.5f}'.format(output[0][i], labels[i]))\n", "\n", "if __name__ == '__main__':\n", " model_name = 'No1_k_01-0.62_0.27.hdf5'\n", " main(model_name)\n" ]
[ 0, 0.07692307692307693, 0.014285714285714285, 0, 0, 0.024390243902439025, 0.1, 0.09090909090909091, 0.08333333333333333, 0.05263157894736842, 0.03125, 0.027777777777777776, 0.09090909090909091, 0.038461538461538464, 0, 0, 0, 0, 0, 0.03125, 0, 0, 0, 0, 0, 0.013157894736842105, 0, 0, 0, 0, 0.06666666666666667, 0, 0, 0, 0, 0, 0, 0, 0, 0.01282051282051282, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0.021739130434782608, 0, 0.023809523809523808, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0.014084507042253521, 0, 0.018867924528301886, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.023809523809523808, 0, 0, 0, 0, 0, 0.013333333333333334, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.021739130434782608, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.045454545454545456, 0.043478260869565216, 0, 0, 0, 0.03571428571428571, 0.019230769230769232, 0, 0, 0, 0.037037037037037035, 0.02702702702702703, 0, 0.09090909090909091, 0.05, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03225806451612903, 0.0625, 0, 0, 0.043478260869565216, 0, 0, 0.012987012987012988, 0, 0, 0, 0, 0.015625, 0.017857142857142856, 0.012987012987012988, 0.047619047619047616, 0.012987012987012988, 0, 0, 0.021739130434782608, 0.013157894736842105, 0, 0, 0.0136986301369863, 0, 0, 0, 0, 0, 0.0625, 0.0625, 0, 0, 0.011494252873563218, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.014492753623188406, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011235955056179775, 0, 0, 0, 0, 0.037037037037037035, 0, 0 ]
241
0.007532
from aiv.window import Window from aiv.opengl import * import numpy from aiv.math.matrix4 import Matrix4 from aiv.input import KEY_LEFT, KEY_RIGHT, KEY_UP, KEY_DOWN window = Window() vertex_shader = """ #version 330 core layout(location = 0) in vec3 vertex; uniform mat4 world; uniform mat4 camera; out vec4 color; void main() { gl_Position = camera * world * vec4(vertex, 1); color = vec4(vertex, 1); } """ fragment_shader = """ #version 330 core in vec4 color; out vec4 final_color; void main() { final_color = color; } """ glClearColor(0, 0, 0, 1) # z fighting !!! glEnable(GL_DEPTH_TEST) vao = glGenVertexArrays(1) glBindVertexArray(vao) vbo_vertices = glGenBuffers(1) glBindBuffer(GL_ARRAY_BUFFER, vbo_vertices) vertices = numpy.array([ # front -1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, # back -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, # right 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, # left -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, # top -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, # bottom -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1 ], numpy.float32) glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW) glEnableVertexAttribArray(0) glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, None) vshader = glCreateShader(GL_VERTEX_SHADER) fshader = glCreateShader(GL_FRAGMENT_SHADER) glShaderSource(vshader, vertex_shader) glShaderSource(fshader, fragment_shader) glCompileShader(vshader) glCompileShader(fshader) program = glCreateProgram() glAttachShader(program, vshader) glAttachShader(program, fshader) glLinkProgram(program) glDetachShader(program, vshader) glDetachShader(program, fshader) glDeleteShader(vshader) glDeleteShader(fshader) glUseProgram(program) # get uniform ids world = glGetUniformLocation(program, "world") camera = glGetUniformLocation(program, "camera") yrot = 0 xrot = 0 while window.is_opened: if window.get_key(KEY_RIGHT): yrot += 60 * window.delta_time if window.get_key(KEY_LEFT): yrot -= 60 * window.delta_time if window.get_key(KEY_UP): xrot += 60 * window.delta_time if window.get_key(KEY_DOWN): xrot -= 60 * window.delta_time glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) #glClear(GL_COLOR_BUFFER_BIT) mat = Matrix4.translate(0, 0, -5) * Matrix4.rotate_x(xrot) * Matrix4.rotate_y(yrot) * Matrix4.scale(1, 1, 1) cam = Matrix4.perspective(60, window.aspect_ratio, 0.1, 100) * Matrix4.look_at(0, 0, 5, 0, 0, 0, 0, 1, 0) glUniformMatrix4fv(world, 1, GL_FALSE, mat.m) glUniformMatrix4fv(camera, 1, GL_FALSE, cam.m) # the draw call glDrawArrays(GL_TRIANGLES, 0, 6 * 6) window.update()
[ "from aiv.window import Window\n", "from aiv.opengl import *\n", "import numpy\n", "from aiv.math.matrix4 import Matrix4\n", "from aiv.input import KEY_LEFT, KEY_RIGHT, KEY_UP, KEY_DOWN\n", "\n", "window = Window()\n", "\n", "vertex_shader = \"\"\"\n", "#version 330 core\n", "\n", "layout(location = 0) in vec3 vertex;\n", "\n", "uniform mat4 world;\n", "uniform mat4 camera;\n", "\n", "out vec4 color;\n", "\n", "void main() {\n", "\tgl_Position = camera * world * vec4(vertex, 1);\n", "\tcolor = vec4(vertex, 1);\n", "}\n", "\"\"\"\n", "\n", "fragment_shader = \"\"\"\n", "#version 330 core\n", "\n", "in vec4 color;\n", "out vec4 final_color;\n", "\n", "void main() {\n", "\tfinal_color = color;\n", "}\n", "\"\"\"\n", "\n", "glClearColor(0, 0, 0, 1)\n", "# z fighting !!!\n", "glEnable(GL_DEPTH_TEST)\n", "\n", "vao = glGenVertexArrays(1)\n", "glBindVertexArray(vao)\n", "\n", "vbo_vertices = glGenBuffers(1)\n", "glBindBuffer(GL_ARRAY_BUFFER, vbo_vertices)\n", "vertices = numpy.array([\n", " # front\n", " -1, -1, 1,\n", " 1, -1, 1,\n", " -1, 1, 1,\n", "\n", " -1, 1, 1,\n", " 1, 1, 1,\n", " 1, -1, 1,\n", "\n", " # back\n", " -1, -1, -1,\n", " 1, -1, -1,\n", " -1, 1, -1,\n", "\n", " -1, 1, -1,\n", " 1, 1, -1,\n", " 1, -1, -1,\n", "\n", " # right\n", " 1, 1, 1,\n", " 1, -1, 1,\n", " 1, -1, -1,\n", "\n", " 1, 1, 1,\n", " 1, 1, -1,\n", " 1, -1, -1,\n", "\n", " # left\n", " -1, 1, 1,\n", " -1, -1, 1,\n", " -1, -1, -1,\n", "\n", " -1, 1, 1,\n", " -1, 1, -1,\n", " -1, -1, -1,\n", "\n", " # top\n", " -1, 1, 1,\n", " 1, 1, 1,\n", " -1, 1, -1,\n", "\n", " -1, 1, -1,\n", " 1, 1, -1,\n", " 1, 1, 1,\n", "\n", " # bottom\n", " -1, -1, 1,\n", " 1, -1, 1,\n", " -1, -1, -1,\n", "\n", " -1, -1, -1,\n", " 1, -1, -1,\n", " 1, -1, 1\n", " ], numpy.float32)\n", "glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW) \n", "glEnableVertexAttribArray(0)\n", "glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, None)\n", "\n", "\n", "vshader = glCreateShader(GL_VERTEX_SHADER)\n", "fshader = glCreateShader(GL_FRAGMENT_SHADER)\n", "\n", "glShaderSource(vshader, vertex_shader)\n", "glShaderSource(fshader, fragment_shader)\n", "\n", "glCompileShader(vshader)\n", "glCompileShader(fshader)\n", "\n", "program = glCreateProgram()\n", "\n", "glAttachShader(program, vshader)\n", "glAttachShader(program, fshader)\n", "\n", "glLinkProgram(program)\n", " \n", "glDetachShader(program, vshader)\n", "glDetachShader(program, fshader)\n", "\n", "glDeleteShader(vshader)\n", "glDeleteShader(fshader)\n", "\n", "glUseProgram(program)\n", "\n", "# get uniform ids\n", "world = glGetUniformLocation(program, \"world\")\n", "camera = glGetUniformLocation(program, \"camera\")\n", "\n", "yrot = 0\n", "xrot = 0\n", "\n", "while window.is_opened:\n", "\n", " if window.get_key(KEY_RIGHT):\n", " yrot += 60 * window.delta_time\n", "\n", " if window.get_key(KEY_LEFT):\n", " yrot -= 60 * window.delta_time\n", "\n", " if window.get_key(KEY_UP):\n", " xrot += 60 * window.delta_time\n", "\n", " if window.get_key(KEY_DOWN):\n", " xrot -= 60 * window.delta_time\n", "\n", " glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n", " #glClear(GL_COLOR_BUFFER_BIT)\n", "\n", " mat = Matrix4.translate(0, 0, -5) * Matrix4.rotate_x(xrot) * Matrix4.rotate_y(yrot) * Matrix4.scale(1, 1, 1)\n", "\n", " cam = Matrix4.perspective(60, window.aspect_ratio, 0.1, 100) * Matrix4.look_at(0, 0, 5, 0, 0, 0, 0, 1, 0)\n", "\n", " glUniformMatrix4fv(world, 1, GL_FALSE, mat.m)\n", " glUniformMatrix4fv(camera, 1, GL_FALSE, cam.m)\n", "\n", " # the draw call\n", " glDrawArrays(GL_TRIANGLES, 0, 6 * 6)\n", "\n", " window.update()\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02040816326530612, 0.038461538461538464, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.045454545454545456, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.013513513513513514, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02631578947368421, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02631578947368421, 0, 0.008547008547008548, 0, 0.008771929824561403, 0, 0, 0, 0, 0, 0, 0, 0 ]
163
0.00214
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import re from glob import glob from setuptools import find_packages, setup from os.path import join, dirname execfile(join(dirname(__file__), 'openerp', 'release.py')) # Load release variables lib_name = 'openerp' def py2exe_datafiles(): data_files = {} data_files['Microsoft.VC90.CRT'] = glob('C:\Microsoft.VC90.CRT\*.*') for root, dirnames, filenames in os.walk('openerp'): for filename in filenames: if not re.match(r'.*(\.pyc|\.pyo|\~)$', filename): data_files.setdefault(root, []).append(join(root, filename)) import babel data_files['babel/localedata'] = glob(join(dirname(babel.__file__), 'localedata', '*')) others = ['global.dat', 'numbers.py', 'support.py', 'plural.py'] data_files['babel'] = map(lambda f: join(dirname(babel.__file__), f), others) others = ['frontend.py', 'mofile.py'] data_files['babel/messages'] = map(lambda f: join(dirname(babel.__file__), 'messages', f), others) import pytz tzdir = dirname(pytz.__file__) for root, _, filenames in os.walk(join(tzdir, 'zoneinfo')): base = join('pytz', root[len(tzdir) + 1:]) data_files[base] = [join(root, f) for f in filenames] import docutils dudir = dirname(docutils.__file__) for root, _, filenames in os.walk(dudir): base = join('docutils', root[len(dudir) + 1:]) data_files[base] = [join(root, f) for f in filenames if not f.endswith(('.py', '.pyc', '.pyo'))] import passlib pl = dirname(passlib.__file__) for root, _, filenames in os.walk(pl): base = join('passlib', root[len(pl) + 1:]) data_files[base] = [join(root, f) for f in filenames if not f.endswith(('.py', '.pyc', '.pyo'))] return data_files.items() def py2exe_options(): if os.name == 'nt': import py2exe return { 'console': [ {'script': 'odoo.py'}, {'script': 'openerp-gevent'}, {'script': 'openerp-server', 'icon_resources': [ (1, join('setup', 'win32', 'static', 'pixmaps', 'openerp-icon.ico')) ]}, ], 'options': { 'py2exe': { 'skip_archive': 1, 'optimize': 0, # Keep the assert running as the integrated tests rely on them. 'dist_dir': 'dist', 'packages': [ 'asynchat', 'asyncore', 'commands', 'dateutil', 'decimal', 'decorator', 'docutils', 'email', 'encodings', 'HTMLParser', 'imaplib', 'jinja2', 'lxml', 'lxml._elementpath', 'lxml.builder', 'lxml.etree', 'lxml.objectify', 'mako', 'markupsafe', 'mock', 'openerp', 'openid', 'passlib', 'PIL', 'poplib', 'psutil', 'pychart', 'pydot', 'pyparsing', 'pyPdf', 'pytz', 'reportlab', 'requests', 'select', 'simplejson', 'smtplib', 'uuid', 'vatnumber', 'vobject', 'win32service', 'win32serviceutil', 'xlwt', 'xml', 'xml.dom', 'yaml', ], 'excludes': ['Tkconstants', 'Tkinter', 'tcl'], } }, 'data_files': py2exe_datafiles() } else: return {} setup( name='odoo', version=version, description=description, long_description=long_desc, url=url, author=author, author_email=author_email, classifiers=filter(None, classifiers.split('\n')), license=license, scripts=['openerp-server', 'odoo.py'], packages=find_packages(), package_dir={'%s' % lib_name: 'openerp'}, include_package_data=True, dependency_links=['http://download.gna.org/pychart/'], install_requires=[ 'babel >= 1.0', 'decorator', 'docutils', 'feedparser', 'gevent', 'Jinja2', 'lxml', # windows binary http://www.lfd.uci.edu/~gohlke/pythonlibs/ 'mako', 'mock', 'passlib', 'pillow', # windows binary http://www.lfd.uci.edu/~gohlke/pythonlibs/ 'psutil', # windows binary code.google.com/p/psutil/downloads/list 'psycogreen', 'psycopg2 >= 2.2', 'pychart', # not on pypi, use: pip install http://download.gna.org/pychart/PyChart-1.39.tar.gz 'pydot', 'pyparsing < 2', 'pypdf', 'pyserial', 'python-dateutil', 'python-ldap', # optional 'python-openid', 'pytz', 'pyusb >= 1.0.0b1', 'pyyaml', 'qrcode', 'reportlab', # windows binary pypi.python.org/pypi/reportlab 'requests', 'simplejson', 'unittest2', 'vatnumber', 'vobject', 'werkzeug', 'xlwt', ], extras_require={ 'SSL': ['pyopenssl'], }, tests_require=[ 'unittest2', 'mock', ], **py2exe_options() )
[ "#!/usr/bin/env python\n", "# -*- coding: utf-8 -*-\n", "\n", "import os\n", "import re\n", "from glob import glob\n", "from setuptools import find_packages, setup\n", "from os.path import join, dirname\n", "\n", "\n", "execfile(join(dirname(__file__), 'openerp', 'release.py')) # Load release variables\n", "lib_name = 'openerp'\n", "\n", "\n", "def py2exe_datafiles():\n", " data_files = {}\n", " data_files['Microsoft.VC90.CRT'] = glob('C:\\Microsoft.VC90.CRT\\*.*')\n", "\n", " for root, dirnames, filenames in os.walk('openerp'):\n", " for filename in filenames:\n", " if not re.match(r'.*(\\.pyc|\\.pyo|\\~)$', filename):\n", " data_files.setdefault(root, []).append(join(root, filename))\n", "\n", " import babel\n", " data_files['babel/localedata'] = glob(join(dirname(babel.__file__), 'localedata', '*'))\n", " others = ['global.dat', 'numbers.py', 'support.py', 'plural.py']\n", " data_files['babel'] = map(lambda f: join(dirname(babel.__file__), f), others)\n", " others = ['frontend.py', 'mofile.py']\n", " data_files['babel/messages'] = map(lambda f: join(dirname(babel.__file__), 'messages', f), others)\n", "\n", " import pytz\n", " tzdir = dirname(pytz.__file__)\n", " for root, _, filenames in os.walk(join(tzdir, 'zoneinfo')):\n", " base = join('pytz', root[len(tzdir) + 1:])\n", " data_files[base] = [join(root, f) for f in filenames]\n", "\n", " import docutils\n", " dudir = dirname(docutils.__file__)\n", " for root, _, filenames in os.walk(dudir):\n", " base = join('docutils', root[len(dudir) + 1:])\n", " data_files[base] = [join(root, f) for f in filenames if not f.endswith(('.py', '.pyc', '.pyo'))]\n", "\n", " import passlib\n", " pl = dirname(passlib.__file__)\n", " for root, _, filenames in os.walk(pl):\n", " base = join('passlib', root[len(pl) + 1:])\n", " data_files[base] = [join(root, f) for f in filenames if not f.endswith(('.py', '.pyc', '.pyo'))]\n", "\n", " return data_files.items()\n", "\n", "\n", "def py2exe_options():\n", " if os.name == 'nt':\n", " import py2exe\n", " return {\n", " 'console': [\n", " {'script': 'odoo.py'},\n", " {'script': 'openerp-gevent'},\n", " {'script': 'openerp-server', 'icon_resources': [\n", " (1, join('setup', 'win32', 'static', 'pixmaps', 'openerp-icon.ico'))\n", " ]},\n", " ],\n", " 'options': {\n", " 'py2exe': {\n", " 'skip_archive': 1,\n", " 'optimize': 0, # Keep the assert running as the integrated tests rely on them.\n", " 'dist_dir': 'dist',\n", " 'packages': [\n", " 'asynchat', 'asyncore',\n", " 'commands',\n", " 'dateutil',\n", " 'decimal',\n", " 'decorator',\n", " 'docutils',\n", " 'email',\n", " 'encodings',\n", " 'HTMLParser',\n", " 'imaplib',\n", " 'jinja2',\n", " 'lxml', 'lxml._elementpath', 'lxml.builder', 'lxml.etree', 'lxml.objectify',\n", " 'mako',\n", " 'markupsafe',\n", " 'mock',\n", " 'openerp',\n", " 'openid',\n", " 'passlib',\n", " 'PIL',\n", " 'poplib',\n", " 'psutil',\n", " 'pychart',\n", " 'pydot',\n", " 'pyparsing',\n", " 'pyPdf',\n", " 'pytz',\n", " 'reportlab',\n", " 'requests',\n", " 'select',\n", " 'simplejson',\n", " 'smtplib',\n", " 'uuid',\n", " 'vatnumber',\n", " 'vobject',\n", " 'win32service', 'win32serviceutil',\n", " 'xlwt',\n", " 'xml', 'xml.dom',\n", " 'yaml',\n", " ],\n", " 'excludes': ['Tkconstants', 'Tkinter', 'tcl'],\n", " }\n", " },\n", " 'data_files': py2exe_datafiles()\n", " }\n", " else:\n", " return {}\n", "\n", "\n", "setup(\n", " name='odoo',\n", " version=version,\n", " description=description,\n", " long_description=long_desc,\n", " url=url,\n", " author=author,\n", " author_email=author_email,\n", " classifiers=filter(None, classifiers.split('\\n')),\n", " license=license,\n", " scripts=['openerp-server', 'odoo.py'],\n", " packages=find_packages(),\n", " package_dir={'%s' % lib_name: 'openerp'},\n", " include_package_data=True,\n", " dependency_links=['http://download.gna.org/pychart/'],\n", " install_requires=[\n", " 'babel >= 1.0',\n", " 'decorator',\n", " 'docutils',\n", " 'feedparser',\n", " 'gevent',\n", " 'Jinja2',\n", " 'lxml', # windows binary http://www.lfd.uci.edu/~gohlke/pythonlibs/\n", " 'mako',\n", " 'mock',\n", " 'passlib',\n", " 'pillow', # windows binary http://www.lfd.uci.edu/~gohlke/pythonlibs/\n", " 'psutil', # windows binary code.google.com/p/psutil/downloads/list\n", " 'psycogreen',\n", " 'psycopg2 >= 2.2',\n", " 'pychart', # not on pypi, use: pip install http://download.gna.org/pychart/PyChart-1.39.tar.gz\n", " 'pydot',\n", " 'pyparsing < 2',\n", " 'pypdf',\n", " 'pyserial',\n", " 'python-dateutil',\n", " 'python-ldap', # optional\n", " 'python-openid',\n", " 'pytz',\n", " 'pyusb >= 1.0.0b1',\n", " 'pyyaml',\n", " 'qrcode',\n", " 'reportlab', # windows binary pypi.python.org/pypi/reportlab\n", " 'requests',\n", " 'simplejson',\n", " 'unittest2',\n", " 'vatnumber',\n", " 'vobject',\n", " 'werkzeug',\n", " 'xlwt',\n", " ],\n", " extras_require={\n", " 'SSL': ['pyopenssl'],\n", " },\n", " tests_require=[\n", " 'unittest2',\n", " 'mock',\n", " ],\n", " **py2exe_options()\n", ")\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0.0273972602739726, 0, 0, 0, 0, 0, 0, 0, 0.010869565217391304, 0, 0.012195121951219513, 0, 0.009708737864077669, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009523809523809525, 0, 0, 0, 0, 0, 0.009523809523809525, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011235955056179775, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009900990099009901, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009615384615384616, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
176
0.000748
from __future__ import print_function import threading import time import sys import Pyro4 if sys.version_info < (3, 0): input = raw_input def get_user_token(): return "user123" class CustomAnnotationProxy(Pyro4.Proxy): # override the method that adds annotations and add our own custom user token annotation def _pyroAnnotations(self): return {"USER": get_user_token().encode("utf-8")} class DbAccessor(threading.Thread): def __init__(self, uri): super(DbAccessor, self).__init__() self.proxy = CustomAnnotationProxy(uri) self.daemon = True def run(self): for i in range(3): try: self.proxy.store("number", 100+i) num = self.proxy.retrieve("number") print("[%s] num=%s" % (self.name, num)) except Exception: import traceback traceback.print_exc() print("\n***** Sequential access using multiple proxies on the Session-Bound Database... (no issues)") with CustomAnnotationProxy("PYRONAME:example.usersession.sessiondb") as p1,\ CustomAnnotationProxy("PYRONAME:example.usersession.sessiondb") as p2: p1.store("number", 42) p1.retrieve("number") p2.store("number", 43) p2.retrieve("number") print("\n***** Sequential access using multiple proxies on the Singleton Database... (no issues)") with CustomAnnotationProxy("PYRONAME:example.usersession.singletondb") as p1,\ CustomAnnotationProxy("PYRONAME:example.usersession.singletondb") as p2: p1.store("number", 42) p1.retrieve("number") p2.store("number", 43) p2.retrieve("number") print("\n***** Multiple concurrent proxies on the Session-Bound Database... (no issues)") input("enter to start: ") t1 = DbAccessor("PYRONAME:example.usersession.sessiondb") t2 = DbAccessor("PYRONAME:example.usersession.sessiondb") t1.start() t2.start() time.sleep(1) t1.join() t2.join() print("\n***** Multiple concurrent proxies on the Singleton Database... (threading problem)") input("enter to start: ") t1 = DbAccessor("PYRONAME:example.usersession.singletondb") t2 = DbAccessor("PYRONAME:example.usersession.singletondb") t1.start() t2.start() time.sleep(1) t1.join() t2.join()
[ "from __future__ import print_function\n", "import threading\n", "import time\n", "import sys\n", "import Pyro4\n", "\n", "\n", "if sys.version_info < (3, 0):\n", " input = raw_input\n", "\n", "\n", "def get_user_token():\n", " return \"user123\"\n", "\n", "\n", "class CustomAnnotationProxy(Pyro4.Proxy):\n", " # override the method that adds annotations and add our own custom user token annotation\n", " def _pyroAnnotations(self):\n", " return {\"USER\": get_user_token().encode(\"utf-8\")}\n", "\n", "\n", "class DbAccessor(threading.Thread):\n", " def __init__(self, uri):\n", " super(DbAccessor, self).__init__()\n", " self.proxy = CustomAnnotationProxy(uri)\n", " self.daemon = True\n", "\n", " def run(self):\n", " for i in range(3):\n", " try:\n", " self.proxy.store(\"number\", 100+i)\n", " num = self.proxy.retrieve(\"number\")\n", " print(\"[%s] num=%s\" % (self.name, num))\n", " except Exception:\n", " import traceback\n", " traceback.print_exc()\n", "\n", "\n", "print(\"\\n***** Sequential access using multiple proxies on the Session-Bound Database... (no issues)\")\n", "with CustomAnnotationProxy(\"PYRONAME:example.usersession.sessiondb\") as p1,\\\n", " CustomAnnotationProxy(\"PYRONAME:example.usersession.sessiondb\") as p2:\n", " p1.store(\"number\", 42)\n", " p1.retrieve(\"number\")\n", " p2.store(\"number\", 43)\n", " p2.retrieve(\"number\")\n", "\n", "print(\"\\n***** Sequential access using multiple proxies on the Singleton Database... (no issues)\")\n", "with CustomAnnotationProxy(\"PYRONAME:example.usersession.singletondb\") as p1,\\\n", " CustomAnnotationProxy(\"PYRONAME:example.usersession.singletondb\") as p2:\n", " p1.store(\"number\", 42)\n", " p1.retrieve(\"number\")\n", " p2.store(\"number\", 43)\n", " p2.retrieve(\"number\")\n", "\n", "print(\"\\n***** Multiple concurrent proxies on the Session-Bound Database... (no issues)\")\n", "input(\"enter to start: \")\n", "t1 = DbAccessor(\"PYRONAME:example.usersession.sessiondb\")\n", "t2 = DbAccessor(\"PYRONAME:example.usersession.sessiondb\")\n", "t1.start()\n", "t2.start()\n", "time.sleep(1)\n", "t1.join()\n", "t2.join()\n", "\n", "print(\"\\n***** Multiple concurrent proxies on the Singleton Database... (threading problem)\")\n", "input(\"enter to start: \")\n", "t1 = DbAccessor(\"PYRONAME:example.usersession.singletondb\")\n", "t2 = DbAccessor(\"PYRONAME:example.usersession.singletondb\")\n", "t1.start()\n", "t2.start()\n", "time.sleep(1)\n", "t1.join()\n", "t2.join()\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010752688172043012, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009708737864077669, 0, 0, 0, 0, 0, 0, 0, 0.010101010101010102, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010638297872340425, 0, 0, 0, 0, 0, 0, 0, 0 ]
73
0.000886
from sklearn.cluster import SpectralClustering from sklearn.cluster import KMeans as Kmeans_st # from sklearn.cluster import KMeans_st as Kmeans_st from emhc import EMHC from smdp import SMDP import numpy as np import common from digraph import draw_transition_table def perpare_features(self, n_features=3): data = np.zeros(shape=(self.global_feats['tsne'].shape[0],n_features)) data[:,0:2] = self.global_feats['tsne'] data[:,2] = self.global_feats['value'] # data[:,3] = self.global_feats['time'] # data[:,4] = self.global_feats['termination'] # data[:,5] = self.global_feats['tsne3d_norm'] # data[:,6] = self.hand_craft_feats['missing_bricks'] # data[:,6] = self.hand_craft_feats['hole'] # data[:,7] = self.hand_craft_feats['racket'] # data[:,8] = self.hand_craft_feats['ball_dir'] # data[:,9] = self.hand_craft_feats['traj'] # data[:,9:11] = self.hand_craft_feats['ball_pos'] data[np.isnan(data)] = 0 # 1.2 data standartization # scaler = preprocessing.StandardScaler(with_centering=False).fit(data) # data = scaler.fit_transform(data) # data_mean = data.mean(axis=0) # data -= data_mean return data def clustering_(self, plt, n_points=None, force=0): if n_points==None: n_points = self.global_feats['termination'].shape[0] if self.clustering_labels is not None: self.tsne_scat.set_array(self.clustering_labels.astype(np.float32)/self.clustering_labels.max()) draw_transition_table(transition_table=self.smdp.P, cluster_centers=self.cluster_centers, meanscreen=self.meanscreen, tsne=self.global_feats['tsne'], color=self.color, black_edges=self.smdp.edges) plt.show() if force==0: return n_clusters = self.cluster_params['n_clusters'] W = self.cluster_params['window_size'] n_iters = self.cluster_params['n_iters'] entropy_iters = self.cluster_params['entropy_iters'] # slice data by given indices term = self.global_feats['termination'][:n_points] reward = self.global_feats['reward'][:n_points] value = self.global_feats['value'][:n_points] tsne = self.global_feats['tsne'][:n_points] traj_ids = self.hand_craft_feats['traj'][:n_points] # 1. create data for clustering data = perpare_features(self) data = data[:n_points] data_scale = data.max(axis=0) data /= data_scale # 2. Build cluster model # 2.1 spatio-temporal K-means if self.cluster_params['method'] == 0: windows_vec = np.arange(start=W,stop=W+1,step=1) clusters_vec = np.arange(start=n_clusters,stop=n_clusters+1,step=1) models_vec = [] scores = np.zeros(shape=(len(clusters_vec),1)) for i,n_w in enumerate(windows_vec): for j,n_c in enumerate(clusters_vec): cluster_model = Kmeans_st(n_clusters=n_clusters,window_size=n_w,n_jobs=8,n_init=n_iters,entropy_iters=entropy_iters) cluster_model.fit(data, rewards=reward, termination=term, values=value) labels = cluster_model.labels_ models_vec.append(cluster_model.smdp) scores[j] = cluster_model.smdp.score print 'window size: %d , Value mse: %f' % (n_w, cluster_model.smdp.score) best = np.argmin(scores) self.cluster_params['n_clusters'] +=best self.smdp = models_vec[best] # 2.1 Spectral clustering elif self.cluster_params['method'] == 1: import scipy.spatial.distance import scipy.sparse dists = scipy.spatial.distance.pdist(tsne, 'euclidean') similarity = np.exp(-dists/10) similarity[similarity<1e-2] = 0 print 'Created similarity matrix' affine_mat = scipy.spatial.distance.squareform(similarity) cluster_model = SpectralClustering(n_clusters=n_clusters,affinity='precomputed') labels = cluster_model.fit_predict(affine_mat) # 2.2 EMHC elif self.cluster_params['method'] == 2: # cluster with k means down to n_clusters + D n_clusters_ = n_clusters + 5 kmeans_st_model = Kmeans_st(n_clusters=n_clusters_,window_size=W,n_jobs=8,n_init=n_iters,entropy_iters=entropy_iters, random_state=123) kmeans_st_model.fit(data, rewards=reward, termination=term, values=value) cluster_model = EMHC(X=data, labels=kmeans_st_model.labels_, termination=term, min_clusters=n_clusters, max_entropy=np.inf) cluster_model.fit() labels = cluster_model.labels_ self.smdp = SMDP(labels=labels, termination=term, rewards=reward, values=value, n_clusters=n_clusters) self.smdp.complete_smdp() self.clustering_labels = self.smdp.labels common.create_trajectory_data(self, reward, traj_ids) self.state_pi_correlation = common.reward_policy_correlation(self.traj_list, self.smdp.greedy_policy, self.smdp) top_greedy_vec = [] bottom_greedy_vec = [] max_diff = 0 best_d = 1 for i,d in enumerate(xrange(1,30)): tb_trajs_discr = common.extermum_trajs_discrepency(self.traj_list, self.clustering_labels, term, reward, value, self.smdp.n_clusters, self.smdp.greedy_policy, d=d) top_greedy_vec.append([i,tb_trajs_discr['top_greedy_sum']]) bottom_greedy_vec.append([i,tb_trajs_discr['bottom_greedy_sum']]) diff_i = tb_trajs_discr['top_greedy_sum'] - tb_trajs_discr['bottom_greedy_sum'] if diff_i > max_diff: max_diff = diff_i best_d = d self.tb_trajs_discr = common.extermum_trajs_discrepency(self.traj_list, self.clustering_labels, term, reward, value, self.smdp.n_clusters, self.smdp.greedy_policy, d=best_d) self.top_greedy_vec = top_greedy_vec self.bottom_greedy_vec = bottom_greedy_vec common.draw_skills(self,self.smdp.n_clusters,plt) # 4. collect statistics cluster_centers = cluster_model.cluster_centers_ cluster_centers *= data_scale screen_size = self.screens.shape meanscreen = np.zeros(shape=(n_clusters,screen_size[1],screen_size[2],screen_size[3])) cluster_time = np.zeros(shape=(n_clusters,1)) width = int(np.floor(np.sqrt(n_clusters))) length = int(n_clusters/width) # f, ax = plt.subplots(length,width) for cluster_ind in range(n_clusters): indices = (labels==cluster_ind) cluster_data = data[indices] cluster_time[cluster_ind] = np.mean(self.global_feats['time'][indices]) meanscreen[cluster_ind,:,:,:] = common.calc_cluster_im(self,indices) # 5. draw cluster indices plt.figure(self.fig.number) data *= data_scale for i in range(n_clusters): self.ax_tsne.annotate(i, xy=cluster_centers[i,0:2], size=20, color='r') draw_transition_table(transition_table=self.smdp.P, cluster_centers=cluster_centers, meanscreen=meanscreen, tsne=data[:,0:2], color=self.color, black_edges=self.smdp.edges) self.cluster_centers = cluster_centers self.meanscreen =meanscreen self.cluster_time =cluster_time common.visualize(self) def update_slider(self, name, slider): def f(): setattr(self, name, slider.val) return f
[ "from sklearn.cluster import SpectralClustering\n", "from sklearn.cluster import KMeans as Kmeans_st\n", "# from sklearn.cluster import KMeans_st as Kmeans_st\n", "\n", "from emhc import EMHC\n", "from smdp import SMDP\n", "import numpy as np\n", "import common\n", "from digraph import draw_transition_table\n", "\n", "def perpare_features(self, n_features=3):\n", "\n", " data = np.zeros(shape=(self.global_feats['tsne'].shape[0],n_features))\n", " data[:,0:2] = self.global_feats['tsne']\n", " data[:,2] = self.global_feats['value']\n", " # data[:,3] = self.global_feats['time']\n", " # data[:,4] = self.global_feats['termination']\n", " # data[:,5] = self.global_feats['tsne3d_norm']\n", " # data[:,6] = self.hand_craft_feats['missing_bricks']\n", " # data[:,6] = self.hand_craft_feats['hole']\n", " # data[:,7] = self.hand_craft_feats['racket']\n", " # data[:,8] = self.hand_craft_feats['ball_dir']\n", " # data[:,9] = self.hand_craft_feats['traj']\n", " # data[:,9:11] = self.hand_craft_feats['ball_pos']\n", " data[np.isnan(data)] = 0\n", " # 1.2 data standartization\n", " # scaler = preprocessing.StandardScaler(with_centering=False).fit(data)\n", " # data = scaler.fit_transform(data)\n", "\n", " # data_mean = data.mean(axis=0)\n", " # data -= data_mean\n", " return data\n", "\n", "def clustering_(self, plt, n_points=None, force=0):\n", "\n", " if n_points==None:\n", " n_points = self.global_feats['termination'].shape[0]\n", "\n", " if self.clustering_labels is not None:\n", " self.tsne_scat.set_array(self.clustering_labels.astype(np.float32)/self.clustering_labels.max())\n", " draw_transition_table(transition_table=self.smdp.P, cluster_centers=self.cluster_centers,\n", " meanscreen=self.meanscreen, tsne=self.global_feats['tsne'], color=self.color, black_edges=self.smdp.edges)\n", " plt.show()\n", " if force==0:\n", " return\n", "\n", " n_clusters = self.cluster_params['n_clusters']\n", " W = self.cluster_params['window_size']\n", " n_iters = self.cluster_params['n_iters']\n", " entropy_iters = self.cluster_params['entropy_iters']\n", "\n", " # slice data by given indices\n", " term = self.global_feats['termination'][:n_points]\n", " reward = self.global_feats['reward'][:n_points]\n", " value = self.global_feats['value'][:n_points]\n", " tsne = self.global_feats['tsne'][:n_points]\n", " traj_ids = self.hand_craft_feats['traj'][:n_points]\n", "\n", " # 1. create data for clustering\n", " data = perpare_features(self)\n", " data = data[:n_points]\n", " data_scale = data.max(axis=0)\n", " data /= data_scale\n", "\n", " # 2. Build cluster model\n", " # 2.1 spatio-temporal K-means\n", " if self.cluster_params['method'] == 0:\n", " windows_vec = np.arange(start=W,stop=W+1,step=1)\n", " clusters_vec = np.arange(start=n_clusters,stop=n_clusters+1,step=1)\n", " models_vec = []\n", " scores = np.zeros(shape=(len(clusters_vec),1))\n", " for i,n_w in enumerate(windows_vec):\n", " for j,n_c in enumerate(clusters_vec):\n", " cluster_model = Kmeans_st(n_clusters=n_clusters,window_size=n_w,n_jobs=8,n_init=n_iters,entropy_iters=entropy_iters)\n", " cluster_model.fit(data, rewards=reward, termination=term, values=value)\n", " labels = cluster_model.labels_\n", " models_vec.append(cluster_model.smdp)\n", " scores[j] = cluster_model.smdp.score\n", " print 'window size: %d , Value mse: %f' % (n_w, cluster_model.smdp.score)\n", " best = np.argmin(scores)\n", " self.cluster_params['n_clusters'] +=best\n", " self.smdp = models_vec[best]\n", "\n", " # 2.1 Spectral clustering\n", " elif self.cluster_params['method'] == 1:\n", " import scipy.spatial.distance\n", " import scipy.sparse\n", " dists = scipy.spatial.distance.pdist(tsne, 'euclidean')\n", " similarity = np.exp(-dists/10)\n", " similarity[similarity<1e-2] = 0\n", " print 'Created similarity matrix'\n", " affine_mat = scipy.spatial.distance.squareform(similarity)\n", " cluster_model = SpectralClustering(n_clusters=n_clusters,affinity='precomputed')\n", " labels = cluster_model.fit_predict(affine_mat)\n", "\n", " # 2.2 EMHC\n", " elif self.cluster_params['method'] == 2:\n", " # cluster with k means down to n_clusters + D\n", " n_clusters_ = n_clusters + 5\n", " kmeans_st_model = Kmeans_st(n_clusters=n_clusters_,window_size=W,n_jobs=8,n_init=n_iters,entropy_iters=entropy_iters, random_state=123)\n", " kmeans_st_model.fit(data, rewards=reward, termination=term, values=value)\n", " cluster_model = EMHC(X=data, labels=kmeans_st_model.labels_, termination=term, min_clusters=n_clusters, max_entropy=np.inf)\n", " cluster_model.fit()\n", " labels = cluster_model.labels_\n", " self.smdp = SMDP(labels=labels, termination=term, rewards=reward, values=value, n_clusters=n_clusters)\n", "\n", " self.smdp.complete_smdp()\n", " self.clustering_labels = self.smdp.labels\n", " common.create_trajectory_data(self, reward, traj_ids)\n", " self.state_pi_correlation = common.reward_policy_correlation(self.traj_list, self.smdp.greedy_policy, self.smdp)\n", "\n", " top_greedy_vec = []\n", " bottom_greedy_vec = []\n", " max_diff = 0\n", " best_d = 1\n", " for i,d in enumerate(xrange(1,30)):\n", " tb_trajs_discr = common.extermum_trajs_discrepency(self.traj_list, self.clustering_labels, term, reward, value, self.smdp.n_clusters, self.smdp.greedy_policy, d=d)\n", " top_greedy_vec.append([i,tb_trajs_discr['top_greedy_sum']])\n", " bottom_greedy_vec.append([i,tb_trajs_discr['bottom_greedy_sum']])\n", " diff_i = tb_trajs_discr['top_greedy_sum'] - tb_trajs_discr['bottom_greedy_sum']\n", " if diff_i > max_diff:\n", " max_diff = diff_i\n", " best_d = d\n", "\n", " self.tb_trajs_discr = common.extermum_trajs_discrepency(self.traj_list, self.clustering_labels, term, reward, value, self.smdp.n_clusters, self.smdp.greedy_policy, d=best_d)\n", " self.top_greedy_vec = top_greedy_vec\n", " self.bottom_greedy_vec = bottom_greedy_vec\n", "\n", " common.draw_skills(self,self.smdp.n_clusters,plt)\n", "\n", "\n", " # 4. collect statistics\n", " cluster_centers = cluster_model.cluster_centers_\n", " cluster_centers *= data_scale\n", "\n", " screen_size = self.screens.shape\n", " meanscreen = np.zeros(shape=(n_clusters,screen_size[1],screen_size[2],screen_size[3]))\n", " cluster_time = np.zeros(shape=(n_clusters,1))\n", " width = int(np.floor(np.sqrt(n_clusters)))\n", " length = int(n_clusters/width)\n", " # f, ax = plt.subplots(length,width)\n", "\n", " for cluster_ind in range(n_clusters):\n", " indices = (labels==cluster_ind)\n", " cluster_data = data[indices]\n", " cluster_time[cluster_ind] = np.mean(self.global_feats['time'][indices])\n", " meanscreen[cluster_ind,:,:,:] = common.calc_cluster_im(self,indices)\n", "\n", " # 5. draw cluster indices\n", " plt.figure(self.fig.number)\n", " data *= data_scale\n", " for i in range(n_clusters):\n", " self.ax_tsne.annotate(i, xy=cluster_centers[i,0:2], size=20, color='r')\n", " draw_transition_table(transition_table=self.smdp.P, cluster_centers=cluster_centers,\n", " meanscreen=meanscreen, tsne=data[:,0:2], color=self.color, black_edges=self.smdp.edges)\n", "\n", " self.cluster_centers = cluster_centers\n", " self.meanscreen =meanscreen\n", " self.cluster_time =cluster_time\n", " common.visualize(self)\n", "\n", "def update_slider(self, name, slider):\n", " def f():\n", " setattr(self, name, slider.val)\n", " return f\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.023809523809523808, 0, 0.013333333333333334, 0.022727272727272728, 0.023255813953488372, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.019230769230769232, 0, 0.08695652173913043, 0, 0, 0, 0.009523809523809525, 0.01020408163265306, 0.015037593984962405, 0, 0.047619047619047616, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03508771929824561, 0.02631578947368421, 0, 0.01818181818181818, 0.022222222222222223, 0.02, 0.03759398496240601, 0.011363636363636364, 0, 0, 0, 0.011111111111111112, 0, 0.018867924528301886, 0, 0, 0, 0, 0, 0, 0, 0, 0.025, 0, 0, 0.02247191011235955, 0, 0, 0, 0, 0, 0, 0.034722222222222224, 0.012195121951219513, 0.007575757575757576, 0, 0, 0.009009009009009009, 0, 0, 0, 0, 0.008547008547008548, 0, 0, 0, 0, 0, 0.05, 0.005813953488372093, 0.014705882352941176, 0.013513513513513514, 0.011363636363636364, 0, 0, 0, 0, 0.0056179775280898875, 0, 0, 0, 0.037037037037037035, 0, 0, 0.03571428571428571, 0, 0, 0, 0, 0.05434782608695652, 0.02, 0, 0, 0, 0, 0, 0.025, 0, 0, 0.05194805194805195, 0, 0, 0, 0, 0, 0.0125, 0.011235955056179775, 0.017543859649122806, 0, 0, 0.03125, 0.027777777777777776, 0, 0, 0.02564102564102564, 0, 0, 0 ]
165
0.006321
# -*- coding:utf8 -*- # File : initializer.py # Author : Jiayuan Mao # Email : maojiayuan@gmail.com # Date : 5/25/17 # # This file is part of TensorArtist. from tensorflow.python.ops import init_ops __all__ = [ 'zeros_initializer', 'ones_initializer', 'constant_initializer', 'random_uniform_initializer', 'random_normal_initializer', 'truncated_normal_initializer', 'uniform_unit_scaling_initializer', 'variance_scaling_initializer', 'orthogonal_initializer' ] zeros_initializer = init_ops.zeros_initializer ones_initializer = init_ops.ones_initializer constant_initializer = init_ops.constant_initializer random_uniform_initializer = init_ops.random_uniform_initializer random_normal_initializer = init_ops.random_normal_initializer truncated_normal_initializer = init_ops.truncated_normal_initializer uniform_unit_scaling_initializer = init_ops.uniform_unit_scaling_initializer variance_scaling_initializer = init_ops.variance_scaling_initializer orthogonal_initializer = init_ops.orthogonal_initializer
[ "# -*- coding:utf8 -*-\n", "# File : initializer.py\n", "# Author : Jiayuan Mao\n", "# Email : maojiayuan@gmail.com\n", "# Date : 5/25/17\n", "# \n", "# This file is part of TensorArtist.\n", "\n", "\n", "from tensorflow.python.ops import init_ops\n", "\n", "__all__ = [\n", " 'zeros_initializer', 'ones_initializer', 'constant_initializer',\n", " 'random_uniform_initializer', 'random_normal_initializer', 'truncated_normal_initializer',\n", " 'uniform_unit_scaling_initializer', 'variance_scaling_initializer',\n", " 'orthogonal_initializer'\n", "]\n", "\n", "zeros_initializer = init_ops.zeros_initializer\n", "ones_initializer = init_ops.ones_initializer\n", "constant_initializer = init_ops.constant_initializer\n", "random_uniform_initializer = init_ops.random_uniform_initializer\n", "random_normal_initializer = init_ops.random_normal_initializer\n", "truncated_normal_initializer = init_ops.truncated_normal_initializer\n", "uniform_unit_scaling_initializer = init_ops.uniform_unit_scaling_initializer\n", "variance_scaling_initializer = init_ops.variance_scaling_initializer\n", "orthogonal_initializer = init_ops.orthogonal_initializer\n" ]
[ 0, 0, 0, 0, 0, 0.3333333333333333, 0, 0, 0, 0, 0, 0, 0, 0.010526315789473684, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
27
0.012736
# # Vortex OpenSplice # # This software and documentation are Copyright 2006 to TO_YEAR ADLINK # Technology Limited, its affiliated companies and licensors. All rights # reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys import warnings from re import search from subprocess import check_call from setuptools import setup from setuptools.extension import Extension from Cython.Build import cythonize if not os.getenv('OSPL_HOME'): raise Exception('Environment variable OSPL_HOME not defined.') if os.getenv('SPLICE_TARGET'): version = os.environ['PACKAGE_VERSION'] idl_path = os.environ['OSPL_HOME_NORMALIZED']+'/etc/idl/dds_builtinTopics.idl' LDFLAGS=[os.environ['OSPL_HOME_NORMALIZED']+'/lib/'+os.environ['SPLICE_TARGET']] CINCS=[os.environ['OSPL_HOME_NORMALIZED']+'/src/api/dcps/c99/include', os.environ['OSPL_HOME_NORMALIZED']+'/src/api/dcps/sac/include', os.environ['OSPL_HOME_NORMALIZED']+'/src/api/dcps/c99/bld/'+os.environ['SPLICE_TARGET'], os.environ['OSPL_HOME_NORMALIZED']+'/src/abstraction/os/include', # following includes needed for builtintopics files os.environ['OSPL_HOME_NORMALIZED']+'/src/user/include', os.environ['OSPL_HOME_NORMALIZED']+'/src/kernel/include', os.environ['OSPL_HOME_NORMALIZED']+'/src/osplcore/bld/'+os.environ['SPLICE_TARGET'], os.environ['OSPL_HOME_NORMALIZED']+'/src/database/database/include'] else: with open(os.environ['OSPL_HOME']+'/etc/RELEASEINFO') as f: version = search('PACKAGE_VERSION=(.*)\n', f.read()).group(1) idl_path = os.environ['OSPL_HOME']+'/etc/idl/dds_builtinTopics.idl' LDFLAGS=[os.environ['OSPL_HOME']+'/lib'] CINCS=[os.environ['OSPL_HOME']+'/include/dcps/C/C99', os.environ['OSPL_HOME']+'/include/dcps/C/SAC', os.environ['OSPL_HOME']+'/include/sys'] print('Executing idlpp') check_call(["idlpp", "-l", "c99", idl_path]) print('idlpp success') extensions = [Extension('dds', ['dds.pyx', 'dds_builtinTopicsDcps.c', 'dds_builtinTopicsSacDcps.c', 'dds_builtinTopicsSplDcps.c'], libraries=['dcpsc99','dcpssac','ddskernel'], library_dirs=LDFLAGS, include_dirs=CINCS, extra_compile_args=['-DOSPL_BUILD_DCPSSAC'])] extensions = cythonize(extensions) with warnings.catch_warnings(): # On Windows, running bdist_wheel throws RuntimeWarnings about Config variables not being set, # claiming Python ABI tag may be incorrect. if 'bdist_wheel' in sys.argv: warnings.filterwarnings('ignore', '(.*Py_DEBUG|.*WITH_PYMALLOC)', RuntimeWarning) setup( name = 'dds', version = version, description = 'A python implementation of the OMG DDS Data-Centric Publish-Subscribe (DCPS) API, ' 'for use with Vortex OpenSplice DDS middleware.', url = 'http://vortex.adlinktech.com', author = 'ADLINK Technology', author_email = 'ist_support@adlinktech.com', py_modules = ['ddsutil'], ext_modules = extensions )
[ "#\n", "# Vortex OpenSplice\n", "#\n", "# This software and documentation are Copyright 2006 to TO_YEAR ADLINK\n", "# Technology Limited, its affiliated companies and licensors. All rights\n", "# reserved.\n", "#\n", "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", "# you may not use this file except in compliance with the License.\n", "# You may obtain a copy of the License at\n", "#\n", "# http://www.apache.org/licenses/LICENSE-2.0\n", "#\n", "# Unless required by applicable law or agreed to in writing, software\n", "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", "# See the License for the specific language governing permissions and\n", "# limitations under the License.\n", "#\n", "import os\n", "import sys\n", "import warnings\n", "from re import search\n", "from subprocess import check_call\n", "from setuptools import setup\n", "from setuptools.extension import Extension\n", "from Cython.Build import cythonize\n", "\n", "if not os.getenv('OSPL_HOME'):\n", " raise Exception('Environment variable OSPL_HOME not defined.')\n", "\n", "if os.getenv('SPLICE_TARGET'):\n", " version = os.environ['PACKAGE_VERSION']\n", " idl_path = os.environ['OSPL_HOME_NORMALIZED']+'/etc/idl/dds_builtinTopics.idl'\n", " LDFLAGS=[os.environ['OSPL_HOME_NORMALIZED']+'/lib/'+os.environ['SPLICE_TARGET']]\n", " CINCS=[os.environ['OSPL_HOME_NORMALIZED']+'/src/api/dcps/c99/include',\n", " os.environ['OSPL_HOME_NORMALIZED']+'/src/api/dcps/sac/include',\n", " os.environ['OSPL_HOME_NORMALIZED']+'/src/api/dcps/c99/bld/'+os.environ['SPLICE_TARGET'],\n", " os.environ['OSPL_HOME_NORMALIZED']+'/src/abstraction/os/include',\n", " # following includes needed for builtintopics files\n", " os.environ['OSPL_HOME_NORMALIZED']+'/src/user/include',\n", " os.environ['OSPL_HOME_NORMALIZED']+'/src/kernel/include',\n", " os.environ['OSPL_HOME_NORMALIZED']+'/src/osplcore/bld/'+os.environ['SPLICE_TARGET'],\n", " os.environ['OSPL_HOME_NORMALIZED']+'/src/database/database/include']\n", "else:\n", " with open(os.environ['OSPL_HOME']+'/etc/RELEASEINFO') as f:\n", " version = search('PACKAGE_VERSION=(.*)\\n', f.read()).group(1)\n", " idl_path = os.environ['OSPL_HOME']+'/etc/idl/dds_builtinTopics.idl'\n", " LDFLAGS=[os.environ['OSPL_HOME']+'/lib']\n", " CINCS=[os.environ['OSPL_HOME']+'/include/dcps/C/C99',\n", " os.environ['OSPL_HOME']+'/include/dcps/C/SAC',\n", " os.environ['OSPL_HOME']+'/include/sys']\n", "\n", "print('Executing idlpp')\n", "check_call([\"idlpp\", \"-l\", \"c99\", idl_path])\n", "print('idlpp success')\n", "\n", "extensions = [Extension('dds',\n", " ['dds.pyx',\n", " 'dds_builtinTopicsDcps.c',\n", " 'dds_builtinTopicsSacDcps.c',\n", " 'dds_builtinTopicsSplDcps.c'],\n", " libraries=['dcpsc99','dcpssac','ddskernel'],\n", " library_dirs=LDFLAGS,\n", " include_dirs=CINCS,\n", " extra_compile_args=['-DOSPL_BUILD_DCPSSAC'])]\n", "\n", "extensions = cythonize(extensions)\n", "\n", "with warnings.catch_warnings():\n", " # On Windows, running bdist_wheel throws RuntimeWarnings about Config variables not being set,\n", " # claiming Python ABI tag may be incorrect.\n", " if 'bdist_wheel' in sys.argv:\n", " warnings.filterwarnings('ignore', '(.*Py_DEBUG|.*WITH_PYMALLOC)', RuntimeWarning)\n", " setup(\n", " name = 'dds',\n", " version = version,\n", " description = 'A python implementation of the OMG DDS Data-Centric Publish-Subscribe (DCPS) API, '\n", " 'for use with Vortex OpenSplice DDS middleware.',\n", " url = 'http://vortex.adlinktech.com',\n", " author = 'ADLINK Technology',\n", " author_email = 'ist_support@adlinktech.com',\n", " py_modules = ['ddsutil'],\n", " ext_modules = extensions\n", " )\n", "\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0.023529411764705882, 0.013333333333333334, 0.013157894736842105, 0.019801980198019802, 0.01282051282051282, 0.015625, 0, 0, 0.010309278350515464, 0.012345679012345678, 0, 0, 0, 0, 0.022222222222222223, 0.017241379310344827, 0.01694915254237288, 0.019230769230769232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.028985507246376812, 0, 0, 0, 0, 0, 0, 0, 0.010101010101010102, 0, 0, 0.011111111111111112, 0, 0.09090909090909091, 0.07407407407407407, 0.028037383177570093, 0, 0.043478260869565216, 0.05263157894736842, 0.03773584905660377, 0.058823529411764705, 0.06060606060606061, 0, 1 ]
86
0.019827
""" Common code shared by course and library fixtures. """ import re import requests import json from lazy import lazy from common.test.acceptance.fixtures import STUDIO_BASE_URL class StudioApiLoginError(Exception): """ Error occurred while logging in to the Studio API. """ pass class StudioApiFixture(object): """ Base class for fixtures that use the Studio restful API. """ def __init__(self): # Info about the auto-auth user used to create the course/library. self.user = {} @lazy def session(self): """ Log in as a staff user, then return a `requests` `session` object for the logged in user. Raises a `StudioApiLoginError` if the login fails. """ # Use auto-auth to retrieve the session for a logged in user session = requests.Session() response = session.get(STUDIO_BASE_URL + "/auto_auth?staff=true") # Return the session from the request if response.ok: # auto_auth returns information about the newly created user # capture this so it can be used by by the testcases. user_pattern = re.compile(r'Logged in user {0} \({1}\) with password {2} and user_id {3}'.format( r'(?P<username>\S+)', r'(?P<email>[^\)]+)', r'(?P<password>\S+)', r'(?P<user_id>\d+)')) user_matches = re.match(user_pattern, response.text) if user_matches: self.user = user_matches.groupdict() return session else: msg = "Could not log in to use Studio restful API. Status code: {0}".format(response.status_code) raise StudioApiLoginError(msg) @lazy def session_cookies(self): """ Log in as a staff user, then return the cookies for the session (as a dict) Raises a `StudioApiLoginError` if the login fails. """ return {key: val for key, val in self.session.cookies.items()} @lazy def headers(self): """ Default HTTP headers dict. """ return { 'Content-type': 'application/json', 'Accept': 'application/json', 'X-CSRFToken': self.session_cookies.get('csrftoken', '') } class FixtureError(Exception): """ Error occurred while installing a course or library fixture. """ pass class XBlockContainerFixture(StudioApiFixture): """ Base class for course and library fixtures. """ def __init__(self): self.children = [] super(XBlockContainerFixture, self).__init__() def add_children(self, *args): """ Add children XBlock to the container. Each item in `args` is an `XBlockFixtureDesc` object. Returns the fixture to allow chaining. """ self.children.extend(args) return self def _create_xblock_children(self, parent_loc, xblock_descriptions): """ Recursively create XBlock children. """ for desc in xblock_descriptions: loc = self.create_xblock(parent_loc, desc) self._create_xblock_children(loc, desc.children) def create_xblock(self, parent_loc, xblock_desc): """ Create an XBlock with `parent_loc` (the location of the parent block) and `xblock_desc` (an `XBlockFixtureDesc` instance). """ create_payload = { 'category': xblock_desc.category, 'display_name': xblock_desc.display_name, } if parent_loc is not None: create_payload['parent_locator'] = parent_loc # Create the new XBlock response = self.session.post( STUDIO_BASE_URL + '/xblock/', data=json.dumps(create_payload), headers=self.headers, ) if not response.ok: msg = "Could not create {0}. Status was {1}".format(xblock_desc, response.status_code) raise FixtureError(msg) try: loc = response.json().get('locator') xblock_desc.locator = loc except ValueError: raise FixtureError("Could not decode JSON from '{0}'".format(response.content)) # Configure the XBlock response = self.session.post( STUDIO_BASE_URL + '/xblock/' + loc, data=xblock_desc.serialize(), headers=self.headers, ) if response.ok: return loc else: raise FixtureError("Could not update {0}. Status code: {1}".format(xblock_desc, response.status_code)) def _update_xblock(self, locator, data): """ Update the xblock at `locator`. """ # Create the new XBlock response = self.session.put( "{}/xblock/{}".format(STUDIO_BASE_URL, locator), data=json.dumps(data), headers=self.headers, ) if not response.ok: msg = "Could not update {} with data {}. Status was {}".format(locator, data, response.status_code) raise FixtureError(msg) def _encode_post_dict(self, post_dict): """ Encode `post_dict` (a dictionary) as UTF-8 encoded JSON. """ return json.dumps({ k: v.encode('utf-8') if isinstance(v, basestring) else v for k, v in post_dict.items() }) def get_nested_xblocks(self, category=None): """ Return a list of nested XBlocks for the container that can be filtered by category. """ xblocks = self._get_nested_xblocks(self) if category: xblocks = [x for x in xblocks if x.category == category] return xblocks def _get_nested_xblocks(self, xblock_descriptor): """ Return a list of nested XBlocks for the container. """ xblocks = list(xblock_descriptor.children) for child in xblock_descriptor.children: xblocks.extend(self._get_nested_xblocks(child)) return xblocks def _publish_xblock(self, locator): """ Publish the xblock at `locator`. """ self._update_xblock(locator, {'publish': 'make_public'})
[ "\"\"\"\n", "Common code shared by course and library fixtures.\n", "\"\"\"\n", "import re\n", "import requests\n", "import json\n", "from lazy import lazy\n", "\n", "from common.test.acceptance.fixtures import STUDIO_BASE_URL\n", "\n", "\n", "class StudioApiLoginError(Exception):\n", " \"\"\"\n", " Error occurred while logging in to the Studio API.\n", " \"\"\"\n", " pass\n", "\n", "\n", "class StudioApiFixture(object):\n", " \"\"\"\n", " Base class for fixtures that use the Studio restful API.\n", " \"\"\"\n", " def __init__(self):\n", " # Info about the auto-auth user used to create the course/library.\n", " self.user = {}\n", "\n", " @lazy\n", " def session(self):\n", " \"\"\"\n", " Log in as a staff user, then return a `requests` `session` object for the logged in user.\n", " Raises a `StudioApiLoginError` if the login fails.\n", " \"\"\"\n", " # Use auto-auth to retrieve the session for a logged in user\n", " session = requests.Session()\n", " response = session.get(STUDIO_BASE_URL + \"/auto_auth?staff=true\")\n", "\n", " # Return the session from the request\n", " if response.ok:\n", " # auto_auth returns information about the newly created user\n", " # capture this so it can be used by by the testcases.\n", " user_pattern = re.compile(r'Logged in user {0} \\({1}\\) with password {2} and user_id {3}'.format(\n", " r'(?P<username>\\S+)', r'(?P<email>[^\\)]+)', r'(?P<password>\\S+)', r'(?P<user_id>\\d+)'))\n", " user_matches = re.match(user_pattern, response.text)\n", " if user_matches:\n", " self.user = user_matches.groupdict()\n", "\n", " return session\n", "\n", " else:\n", " msg = \"Could not log in to use Studio restful API. Status code: {0}\".format(response.status_code)\n", " raise StudioApiLoginError(msg)\n", "\n", " @lazy\n", " def session_cookies(self):\n", " \"\"\"\n", " Log in as a staff user, then return the cookies for the session (as a dict)\n", " Raises a `StudioApiLoginError` if the login fails.\n", " \"\"\"\n", " return {key: val for key, val in self.session.cookies.items()}\n", "\n", " @lazy\n", " def headers(self):\n", " \"\"\"\n", " Default HTTP headers dict.\n", " \"\"\"\n", " return {\n", " 'Content-type': 'application/json',\n", " 'Accept': 'application/json',\n", " 'X-CSRFToken': self.session_cookies.get('csrftoken', '')\n", " }\n", "\n", "\n", "class FixtureError(Exception):\n", " \"\"\"\n", " Error occurred while installing a course or library fixture.\n", " \"\"\"\n", " pass\n", "\n", "\n", "class XBlockContainerFixture(StudioApiFixture):\n", " \"\"\"\n", " Base class for course and library fixtures.\n", " \"\"\"\n", "\n", " def __init__(self):\n", " self.children = []\n", " super(XBlockContainerFixture, self).__init__()\n", "\n", " def add_children(self, *args):\n", " \"\"\"\n", " Add children XBlock to the container.\n", " Each item in `args` is an `XBlockFixtureDesc` object.\n", "\n", " Returns the fixture to allow chaining.\n", " \"\"\"\n", " self.children.extend(args)\n", " return self\n", "\n", " def _create_xblock_children(self, parent_loc, xblock_descriptions):\n", " \"\"\"\n", " Recursively create XBlock children.\n", " \"\"\"\n", " for desc in xblock_descriptions:\n", " loc = self.create_xblock(parent_loc, desc)\n", " self._create_xblock_children(loc, desc.children)\n", "\n", " def create_xblock(self, parent_loc, xblock_desc):\n", " \"\"\"\n", " Create an XBlock with `parent_loc` (the location of the parent block)\n", " and `xblock_desc` (an `XBlockFixtureDesc` instance).\n", " \"\"\"\n", " create_payload = {\n", " 'category': xblock_desc.category,\n", " 'display_name': xblock_desc.display_name,\n", " }\n", "\n", " if parent_loc is not None:\n", " create_payload['parent_locator'] = parent_loc\n", "\n", " # Create the new XBlock\n", " response = self.session.post(\n", " STUDIO_BASE_URL + '/xblock/',\n", " data=json.dumps(create_payload),\n", " headers=self.headers,\n", " )\n", "\n", " if not response.ok:\n", " msg = \"Could not create {0}. Status was {1}\".format(xblock_desc, response.status_code)\n", " raise FixtureError(msg)\n", "\n", " try:\n", " loc = response.json().get('locator')\n", " xblock_desc.locator = loc\n", " except ValueError:\n", " raise FixtureError(\"Could not decode JSON from '{0}'\".format(response.content))\n", "\n", " # Configure the XBlock\n", " response = self.session.post(\n", " STUDIO_BASE_URL + '/xblock/' + loc,\n", " data=xblock_desc.serialize(),\n", " headers=self.headers,\n", " )\n", "\n", " if response.ok:\n", " return loc\n", " else:\n", " raise FixtureError(\"Could not update {0}. Status code: {1}\".format(xblock_desc, response.status_code))\n", "\n", " def _update_xblock(self, locator, data):\n", " \"\"\"\n", " Update the xblock at `locator`.\n", " \"\"\"\n", " # Create the new XBlock\n", " response = self.session.put(\n", " \"{}/xblock/{}\".format(STUDIO_BASE_URL, locator),\n", " data=json.dumps(data),\n", " headers=self.headers,\n", " )\n", "\n", " if not response.ok:\n", " msg = \"Could not update {} with data {}. Status was {}\".format(locator, data, response.status_code)\n", " raise FixtureError(msg)\n", "\n", " def _encode_post_dict(self, post_dict):\n", " \"\"\"\n", " Encode `post_dict` (a dictionary) as UTF-8 encoded JSON.\n", " \"\"\"\n", " return json.dumps({\n", " k: v.encode('utf-8') if isinstance(v, basestring) else v\n", " for k, v in post_dict.items()\n", " })\n", "\n", " def get_nested_xblocks(self, category=None):\n", " \"\"\"\n", " Return a list of nested XBlocks for the container that can be filtered by\n", " category.\n", " \"\"\"\n", " xblocks = self._get_nested_xblocks(self)\n", " if category:\n", " xblocks = [x for x in xblocks if x.category == category]\n", " return xblocks\n", "\n", " def _get_nested_xblocks(self, xblock_descriptor):\n", " \"\"\"\n", " Return a list of nested XBlocks for the container.\n", " \"\"\"\n", " xblocks = list(xblock_descriptor.children)\n", " for child in xblock_descriptor.children:\n", " xblocks.extend(self._get_nested_xblocks(child))\n", " return xblocks\n", "\n", " def _publish_xblock(self, locator):\n", " \"\"\"\n", " Publish the xblock at `locator`.\n", " \"\"\"\n", " self._update_xblock(locator, {'publish': 'make_public'})\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01020408163265306, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00909090909090909, 0.009615384615384616, 0, 0, 0, 0, 0, 0, 0, 0.009009009009009009, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0, 0, 0, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.008620689655172414, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.008849557522123894, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
196
0.000512
import os.path import SCons.Builder import SCons.Node.FS import SCons.Util csccom = "$CSC $CSCFLAGS $_CSCLIBPATH -r:$_CSCLIBS -out:${TARGET.abspath} $SOURCES" csclibcom = "$CSC -t:library $CSCLIBFLAGS $_CSCLIBPATH $_CSCLIBS -out:${TARGET.abspath} $SOURCES" McsBuilder = SCons.Builder.Builder(action = '$CSCCOM', source_factory = SCons.Node.FS.default_fs.Entry, suffix = '.exe') McsLibBuilder = SCons.Builder.Builder(action = '$CSCLIBCOM', source_factory = SCons.Node.FS.default_fs.Entry, suffix = '.dll') def generate(env): env['BUILDERS']['CLIProgram'] = McsBuilder env['BUILDERS']['CLILibrary'] = McsLibBuilder env['CSC'] = 'gmcs' env['_CSCLIBS'] = "${_stripixes('-r:', CILLIBS, '', '-r', '', __env__)}" env['_CSCLIBPATH'] = "${_stripixes('-lib:', CILLIBPATH, '', '-r', '', __env__)}" env['CSCFLAGS'] = SCons.Util.CLVar('-platform:anycpu -codepage:utf8') env['CSCLIBFLAGS'] = SCons.Util.CLVar('-platform:anycpu -codepage:utf8') env['CSCCOM'] = SCons.Action.Action(csccom) env['CSCLIBCOM'] = SCons.Action.Action(csclibcom) def exists(env): return internal_zip or env.Detect('gmcs')
[ "import os.path\n", "import SCons.Builder\n", "import SCons.Node.FS\n", "import SCons.Util\n", "\n", "csccom = \"$CSC $CSCFLAGS $_CSCLIBPATH -r:$_CSCLIBS -out:${TARGET.abspath} $SOURCES\"\n", "csclibcom = \"$CSC -t:library $CSCLIBFLAGS $_CSCLIBPATH $_CSCLIBS -out:${TARGET.abspath} $SOURCES\"\n", "\n", "\n", "McsBuilder = SCons.Builder.Builder(action = '$CSCCOM',\n", " source_factory = SCons.Node.FS.default_fs.Entry,\n", " suffix = '.exe')\n", "\n", "McsLibBuilder = SCons.Builder.Builder(action = '$CSCLIBCOM',\n", " source_factory = SCons.Node.FS.default_fs.Entry,\n", " suffix = '.dll')\n", "\n", "def generate(env):\n", " env['BUILDERS']['CLIProgram'] = McsBuilder\n", " env['BUILDERS']['CLILibrary'] = McsLibBuilder\n", "\n", " env['CSC'] = 'gmcs'\n", " env['_CSCLIBS'] = \"${_stripixes('-r:', CILLIBS, '', '-r', '', __env__)}\"\n", " env['_CSCLIBPATH'] = \"${_stripixes('-lib:', CILLIBPATH, '', '-r', '', __env__)}\"\n", " env['CSCFLAGS'] = SCons.Util.CLVar('-platform:anycpu -codepage:utf8')\n", " env['CSCLIBFLAGS'] = SCons.Util.CLVar('-platform:anycpu -codepage:utf8')\n", " env['CSCCOM'] = SCons.Action.Action(csccom)\n", " env['CSCLIBCOM'] = SCons.Action.Action(csclibcom)\n", "\n", "def exists(env):\n", " return internal_zip or env.Detect('gmcs')\n", "\n" ]
[ 0, 0, 0, 0, 0, 0.011904761904761904, 0.01020408163265306, 0, 0, 0.03636363636363636, 0.03571428571428571, 0.038461538461538464, 0, 0.03278688524590164, 0.047619047619047616, 0.057692307692307696, 0, 0.05263157894736842, 0, 0, 0, 0.03125, 0.0125, 0.011764705882352941, 0.012987012987012988, 0, 0.018867924528301886, 0.017857142857142856, 0, 0.058823529411764705, 0, 1 ]
32
0.046482
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import re import time import math from openerp import api, fields as fields2 from openerp import tools from openerp.osv import fields, osv from openerp.tools import float_round, float_is_zero, float_compare import json CURRENCY_DISPLAY_PATTERN = re.compile(r'(\w+)\s*(?:\((.*)\))?') class res_currency(osv.osv): def _get_current_rate(self, cr, uid, ids, name, arg, context=None): if context is None: context = {} res = {} date = context.get('date') or fields2.Datetime.now() company_id = context.get('company_id') or self.pool['res.users']._get_company(cr, uid, context=context) for id in ids: cr.execute("""SELECT rate FROM res_currency_rate WHERE currency_id = %s AND name <= %s AND (company_id is null OR company_id = %s) ORDER BY company_id, name desc LIMIT 1""", (id, date, company_id)) if cr.rowcount: res[id] = cr.fetchone()[0] else: res[id] = 1 return res def _decimal_places(self, cr, uid, ids, name, arg, context=None): res = {} for id in ids: rounding = self.browse(cr, uid, id, context=context).rounding rounding = (0 < rounding < 1) and rounding or 1 res[id] = int(math.ceil(math.log10(1 / rounding))) return res def _decimal_places(self, cr, uid, ids, name, arg, context=None): res = {} for currency in self.browse(cr, uid, ids, context=context): if currency.rounding > 0 and currency.rounding < 1: res[currency.id] = int(math.ceil(math.log10(1/currency.rounding))) else: res[currency.id] = 0 return res _name = "res.currency" _description = "Currency" _columns = { # Note: 'code' column was removed as of v6.0, the 'name' should now hold the ISO code. 'name': fields.char('Currency', size=3, required=True, help="Currency Code (ISO 4217)"), 'symbol': fields.char('Symbol', size=4, help="Currency sign, to be used when printing amounts."), 'rate': fields.function(_get_current_rate, string='Current Rate', digits=(12,6), help='The rate of the currency to the currency of rate 1.'), 'rate_ids': fields.one2many('res.currency.rate', 'currency_id', 'Rates'), 'rounding': fields.float('Rounding Factor', digits=(12,6)), 'decimal_places': fields.function(_decimal_places, string='Decimal Places', type='integer'), 'active': fields.boolean('Active'), 'position': fields.selection([('after','After Amount'),('before','Before Amount')], 'Symbol Position', help="Determines where the currency symbol should be placed after or before the amount.") } _defaults = { 'active': 1, 'position' : 'after', 'rounding': 0.01, } _sql_constraints = [ ('unique_name', 'unique (name)', 'The currency code must be unique!'), ] _order = "name" date = fields2.Date(compute='compute_date') @api.one @api.depends('rate_ids.name') def compute_date(self): self.date = self.rate_ids[:1].name def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100): if not args: args = [] results = super(res_currency,self)\ .name_search(cr, user, name, args, operator=operator, context=context, limit=limit) if not results: name_match = CURRENCY_DISPLAY_PATTERN.match(name) if name_match: results = super(res_currency,self)\ .name_search(cr, user, name_match.group(1), args, operator=operator, context=context, limit=limit) return results def name_get(self, cr, uid, ids, context=None): if not ids: return [] if isinstance(ids, (int, long)): ids = [ids] reads = self.read(cr, uid, ids, ['name','symbol'], context=context, load='_classic_write') return [(x['id'], tools.ustr(x['name'])) for x in reads] def copy(self, cr, uid, id, default=None, context=None): if context is None: context = {} if not default: default = {} default.update(name=_("%s (copy)") % (self.browse(cr, uid, id, context=context).name)) return super(res_currency, self).copy( cr, uid, id, default=default, context=context) @api.cr_uid_records def round(self, cr, uid, currency, amount): """Return ``amount`` rounded according to ``currency``'s rounding rules. :param Record currency: currency for which we are rounding :param float amount: the amount to round :return: rounded float With the new API, call it like: ``currency.round(amount)``. """ return float_round(amount, precision_rounding=currency.rounding) @api.cr_uid_records def compare_amounts(self, cr, uid, currency, amount1, amount2): """Compare ``amount1`` and ``amount2`` after rounding them according to the given currency's precision.. An amount is considered lower/greater than another amount if their rounded value is different. This is not the same as having a non-zero difference! For example 1.432 and 1.431 are equal at 2 digits precision, so this method would return 0. However 0.006 and 0.002 are considered different (returns 1) because they respectively round to 0.01 and 0.0, even though 0.006-0.002 = 0.004 which would be considered zero at 2 digits precision. :param Record currency: currency for which we are rounding :param float amount1: first amount to compare :param float amount2: second amount to compare :return: (resp.) -1, 0 or 1, if ``amount1`` is (resp.) lower than, equal to, or greater than ``amount2``, according to ``currency``'s rounding. With the new API, call it like: ``currency.compare_amounts(amount1, amount2)``. """ return float_compare(amount1, amount2, precision_rounding=currency.rounding) @api.cr_uid_records def is_zero(self, cr, uid, currency, amount): """Returns true if ``amount`` is small enough to be treated as zero according to ``currency``'s rounding rules. Warning: ``is_zero(amount1-amount2)`` is not always equivalent to ``compare_amounts(amount1,amount2) == 0``, as the former will round after computing the difference, while the latter will round before, giving different results for e.g. 0.006 and 0.002 at 2 digits precision. :param Record currency: currency for which we are rounding :param float amount: amount to compare with currency's zero With the new API, call it like: ``currency.is_zero(amount)``. """ return float_is_zero(amount, precision_rounding=currency.rounding) def _get_conversion_rate(self, cr, uid, from_currency, to_currency, context=None): if context is None: context = {} ctx = context.copy() from_currency = self.browse(cr, uid, from_currency.id, context=ctx) to_currency = self.browse(cr, uid, to_currency.id, context=ctx) return to_currency.rate/from_currency.rate def _compute(self, cr, uid, from_currency, to_currency, from_amount, round=True, context=None): if (to_currency.id == from_currency.id): if round: return self.round(cr, uid, to_currency, from_amount) else: return from_amount else: rate = self._get_conversion_rate(cr, uid, from_currency, to_currency, context=context) if round: return self.round(cr, uid, to_currency, from_amount * rate) else: return from_amount * rate @api.v7 def compute(self, cr, uid, from_currency_id, to_currency_id, from_amount, round=True, context=None): context = context or {} if not from_currency_id: from_currency_id = to_currency_id if not to_currency_id: to_currency_id = from_currency_id xc = self.browse(cr, uid, [from_currency_id,to_currency_id], context=context) from_currency = (xc[0].id == from_currency_id and xc[0]) or xc[1] to_currency = (xc[0].id == to_currency_id and xc[0]) or xc[1] return self._compute(cr, uid, from_currency, to_currency, from_amount, round, context) @api.v8 def compute(self, from_amount, to_currency, round=True): """ Convert `from_amount` from currency `self` to `to_currency`. """ assert self, "compute from unknown currency" assert to_currency, "compute to unknown currency" # apply conversion rate if self == to_currency: to_amount = from_amount else: to_amount = from_amount * self._get_conversion_rate(self, to_currency) # apply rounding return to_currency.round(to_amount) if round else to_amount @api.v7 def get_format_currencies_js_function(self, cr, uid, context=None): """ Returns a string that can be used to instanciate a javascript function that formats numbers as currencies. That function expects the number as first parameter and the currency id as second parameter. If the currency id parameter is false or undefined, the company currency is used. """ company_currency_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id function = "" for row in self.search_read(cr, uid, domain=[], fields=['id', 'name', 'symbol', 'decimal_places', 'position'], context=context): symbol = row['symbol'] or row['name'] format_number_str = "openerp.web.format_value(arguments[0], {type: 'float', digits: [69," + str(row['decimal_places']) + "]}, 0.00)" if row['position'] == 'after': return_str = "return " + format_number_str + " + '\\xA0' + " + json.dumps(symbol) + ";" else: return_str = "return " + json.dumps(symbol) + " + '\\xA0' + " + format_number_str + ";" function += "if (arguments[1] === " + str(row['id']) + ") { " + return_str + " }" if (row['id'] == company_currency_id): company_currency_format = return_str function = "if (arguments[1] === false || arguments[1] === undefined) {" + company_currency_format + " }" + function return function class res_currency_rate(osv.osv): _name = "res.currency.rate" _description = "Currency Rate" _columns = { 'name': fields.datetime('Date', required=True, select=True), 'rate': fields.float('Rate', digits=(12, 6), help='The rate of the currency to the currency of rate 1'), 'currency_id': fields.many2one('res.currency', 'Currency', readonly=True), 'company_id': fields.many2one('res.company', 'Company') } _defaults = { 'name': lambda *a: time.strftime('%Y-%m-%d 00:00:00'), } _order = "name desc" def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80): if operator in ['=', '!=']: try: date_format = '%Y-%m-%d' if context.get('lang'): lang_obj = self.pool['res.lang'] lang_ids = lang_obj.search(cr, user, [('code', '=', context['lang'])], context=context) if lang_ids: date_format = lang_obj.browse(cr, user, lang_ids[0], context=context).date_format name = time.strftime('%Y-%m-%d', time.strptime(name, date_format)) except ValueError: try: args.append(('rate', operator, float(name))) except ValueError: return [] name = '' operator = 'ilike' return super(res_currency_rate, self).name_search(cr, user, name, args=args, operator=operator, context=context, limit=limit)
[ "# -*- coding: utf-8 -*-\n", "# Part of Odoo. See LICENSE file for full copyright and licensing details.\n", "\n", "import re\n", "import time\n", "import math\n", "\n", "from openerp import api, fields as fields2\n", "from openerp import tools\n", "from openerp.osv import fields, osv\n", "from openerp.tools import float_round, float_is_zero, float_compare\n", "import json\n", "\n", "CURRENCY_DISPLAY_PATTERN = re.compile(r'(\\w+)\\s*(?:\\((.*)\\))?')\n", "\n", "class res_currency(osv.osv):\n", "\n", " def _get_current_rate(self, cr, uid, ids, name, arg, context=None):\n", " if context is None:\n", " context = {}\n", " res = {}\n", "\n", " date = context.get('date') or fields2.Datetime.now()\n", " company_id = context.get('company_id') or self.pool['res.users']._get_company(cr, uid, context=context)\n", " for id in ids:\n", " cr.execute(\"\"\"SELECT rate FROM res_currency_rate \n", " WHERE currency_id = %s\n", " AND name <= %s\n", " AND (company_id is null\n", " OR company_id = %s)\n", " ORDER BY company_id, name desc LIMIT 1\"\"\",\n", " (id, date, company_id))\n", " if cr.rowcount:\n", " res[id] = cr.fetchone()[0]\n", " else:\n", " res[id] = 1\n", " return res\n", "\n", " def _decimal_places(self, cr, uid, ids, name, arg, context=None):\n", " res = {}\n", " for id in ids:\n", " rounding = self.browse(cr, uid, id, context=context).rounding\n", " rounding = (0 < rounding < 1) and rounding or 1\n", " res[id] = int(math.ceil(math.log10(1 / rounding)))\n", " return res\n", "\n", " def _decimal_places(self, cr, uid, ids, name, arg, context=None):\n", " res = {}\n", " for currency in self.browse(cr, uid, ids, context=context):\n", " if currency.rounding > 0 and currency.rounding < 1:\n", " res[currency.id] = int(math.ceil(math.log10(1/currency.rounding)))\n", " else:\n", " res[currency.id] = 0\n", " return res\n", "\n", " _name = \"res.currency\"\n", " _description = \"Currency\"\n", " _columns = {\n", " # Note: 'code' column was removed as of v6.0, the 'name' should now hold the ISO code.\n", " 'name': fields.char('Currency', size=3, required=True, help=\"Currency Code (ISO 4217)\"),\n", " 'symbol': fields.char('Symbol', size=4, help=\"Currency sign, to be used when printing amounts.\"),\n", " 'rate': fields.function(_get_current_rate, string='Current Rate', digits=(12,6),\n", " help='The rate of the currency to the currency of rate 1.'),\n", " 'rate_ids': fields.one2many('res.currency.rate', 'currency_id', 'Rates'),\n", " 'rounding': fields.float('Rounding Factor', digits=(12,6)),\n", " 'decimal_places': fields.function(_decimal_places, string='Decimal Places', type='integer'),\n", " 'active': fields.boolean('Active'),\n", " 'position': fields.selection([('after','After Amount'),('before','Before Amount')], 'Symbol Position', help=\"Determines where the currency symbol should be placed after or before the amount.\")\n", " }\n", " _defaults = {\n", " 'active': 1,\n", " 'position' : 'after',\n", " 'rounding': 0.01,\n", " }\n", " _sql_constraints = [\n", " ('unique_name', 'unique (name)', 'The currency code must be unique!'),\n", " ]\n", " _order = \"name\"\n", "\n", " date = fields2.Date(compute='compute_date')\n", "\n", " @api.one\n", " @api.depends('rate_ids.name')\n", " def compute_date(self):\n", " self.date = self.rate_ids[:1].name\n", "\n", " def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):\n", " if not args:\n", " args = []\n", " results = super(res_currency,self)\\\n", " .name_search(cr, user, name, args, operator=operator, context=context, limit=limit)\n", " if not results:\n", " name_match = CURRENCY_DISPLAY_PATTERN.match(name)\n", " if name_match:\n", " results = super(res_currency,self)\\\n", " .name_search(cr, user, name_match.group(1), args, operator=operator, context=context, limit=limit)\n", " return results\n", "\n", " def name_get(self, cr, uid, ids, context=None):\n", " if not ids:\n", " return []\n", " if isinstance(ids, (int, long)):\n", " ids = [ids]\n", " reads = self.read(cr, uid, ids, ['name','symbol'], context=context, load='_classic_write')\n", " return [(x['id'], tools.ustr(x['name'])) for x in reads]\n", "\n", " def copy(self, cr, uid, id, default=None, context=None):\n", " if context is None:\n", " context = {}\n", " if not default:\n", " default = {}\n", " default.update(name=_(\"%s (copy)\")\n", " % (self.browse(cr, uid, id, context=context).name))\n", " return super(res_currency, self).copy(\n", " cr, uid, id, default=default, context=context)\n", "\n", " @api.cr_uid_records\n", " def round(self, cr, uid, currency, amount):\n", " \"\"\"Return ``amount`` rounded according to ``currency``'s\n", " rounding rules.\n", "\n", " :param Record currency: currency for which we are rounding\n", " :param float amount: the amount to round\n", " :return: rounded float\n", "\n", " With the new API, call it like: ``currency.round(amount)``.\n", " \"\"\"\n", " return float_round(amount, precision_rounding=currency.rounding)\n", "\n", " @api.cr_uid_records\n", " def compare_amounts(self, cr, uid, currency, amount1, amount2):\n", " \"\"\"Compare ``amount1`` and ``amount2`` after rounding them according to the\n", " given currency's precision..\n", " An amount is considered lower/greater than another amount if their rounded\n", " value is different. This is not the same as having a non-zero difference!\n", "\n", " For example 1.432 and 1.431 are equal at 2 digits precision,\n", " so this method would return 0.\n", " However 0.006 and 0.002 are considered different (returns 1) because\n", " they respectively round to 0.01 and 0.0, even though\n", " 0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.\n", "\n", " :param Record currency: currency for which we are rounding\n", " :param float amount1: first amount to compare\n", " :param float amount2: second amount to compare\n", " :return: (resp.) -1, 0 or 1, if ``amount1`` is (resp.) lower than,\n", " equal to, or greater than ``amount2``, according to\n", " ``currency``'s rounding.\n", "\n", " With the new API, call it like: ``currency.compare_amounts(amount1, amount2)``.\n", " \"\"\"\n", " return float_compare(amount1, amount2, precision_rounding=currency.rounding)\n", "\n", " @api.cr_uid_records\n", " def is_zero(self, cr, uid, currency, amount):\n", " \"\"\"Returns true if ``amount`` is small enough to be treated as\n", " zero according to ``currency``'s rounding rules.\n", "\n", " Warning: ``is_zero(amount1-amount2)`` is not always equivalent to \n", " ``compare_amounts(amount1,amount2) == 0``, as the former will round after\n", " computing the difference, while the latter will round before, giving\n", " different results for e.g. 0.006 and 0.002 at 2 digits precision.\n", "\n", " :param Record currency: currency for which we are rounding\n", " :param float amount: amount to compare with currency's zero\n", "\n", " With the new API, call it like: ``currency.is_zero(amount)``.\n", " \"\"\"\n", " return float_is_zero(amount, precision_rounding=currency.rounding)\n", "\n", " def _get_conversion_rate(self, cr, uid, from_currency, to_currency, context=None):\n", " if context is None:\n", " context = {}\n", " ctx = context.copy()\n", " from_currency = self.browse(cr, uid, from_currency.id, context=ctx)\n", " to_currency = self.browse(cr, uid, to_currency.id, context=ctx)\n", " return to_currency.rate/from_currency.rate\n", "\n", " def _compute(self, cr, uid, from_currency, to_currency, from_amount, round=True, context=None):\n", " if (to_currency.id == from_currency.id):\n", " if round:\n", " return self.round(cr, uid, to_currency, from_amount)\n", " else:\n", " return from_amount\n", " else:\n", " rate = self._get_conversion_rate(cr, uid, from_currency, to_currency, context=context)\n", " if round:\n", " return self.round(cr, uid, to_currency, from_amount * rate)\n", " else:\n", " return from_amount * rate\n", "\n", " @api.v7\n", " def compute(self, cr, uid, from_currency_id, to_currency_id, from_amount,\n", " round=True, context=None):\n", " context = context or {}\n", " if not from_currency_id:\n", " from_currency_id = to_currency_id\n", " if not to_currency_id:\n", " to_currency_id = from_currency_id\n", " xc = self.browse(cr, uid, [from_currency_id,to_currency_id], context=context)\n", " from_currency = (xc[0].id == from_currency_id and xc[0]) or xc[1]\n", " to_currency = (xc[0].id == to_currency_id and xc[0]) or xc[1]\n", " return self._compute(cr, uid, from_currency, to_currency, from_amount, round, context)\n", "\n", " @api.v8\n", " def compute(self, from_amount, to_currency, round=True):\n", " \"\"\" Convert `from_amount` from currency `self` to `to_currency`. \"\"\"\n", " assert self, \"compute from unknown currency\"\n", " assert to_currency, \"compute to unknown currency\"\n", " # apply conversion rate\n", " if self == to_currency:\n", " to_amount = from_amount\n", " else:\n", " to_amount = from_amount * self._get_conversion_rate(self, to_currency)\n", " # apply rounding\n", " return to_currency.round(to_amount) if round else to_amount\n", "\n", " @api.v7\n", " def get_format_currencies_js_function(self, cr, uid, context=None):\n", " \"\"\" Returns a string that can be used to instanciate a javascript function that formats numbers as currencies.\n", " That function expects the number as first parameter and the currency id as second parameter.\n", " If the currency id parameter is false or undefined, the company currency is used.\n", " \"\"\"\n", " company_currency_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id\n", " function = \"\"\n", " for row in self.search_read(cr, uid, domain=[], fields=['id', 'name', 'symbol', 'decimal_places', 'position'], context=context):\n", " symbol = row['symbol'] or row['name']\n", " format_number_str = \"openerp.web.format_value(arguments[0], {type: 'float', digits: [69,\" + str(row['decimal_places']) + \"]}, 0.00)\"\n", " if row['position'] == 'after':\n", " return_str = \"return \" + format_number_str + \" + '\\\\xA0' + \" + json.dumps(symbol) + \";\"\n", " else:\n", " return_str = \"return \" + json.dumps(symbol) + \" + '\\\\xA0' + \" + format_number_str + \";\"\n", " function += \"if (arguments[1] === \" + str(row['id']) + \") { \" + return_str + \" }\"\n", " if (row['id'] == company_currency_id):\n", " company_currency_format = return_str\n", " function = \"if (arguments[1] === false || arguments[1] === undefined) {\" + company_currency_format + \" }\" + function\n", " return function\n", "\n", "class res_currency_rate(osv.osv):\n", " _name = \"res.currency.rate\"\n", " _description = \"Currency Rate\"\n", "\n", " _columns = {\n", " 'name': fields.datetime('Date', required=True, select=True),\n", " 'rate': fields.float('Rate', digits=(12, 6), help='The rate of the currency to the currency of rate 1'),\n", " 'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),\n", " 'company_id': fields.many2one('res.company', 'Company')\n", " }\n", " _defaults = {\n", " 'name': lambda *a: time.strftime('%Y-%m-%d 00:00:00'),\n", " }\n", " _order = \"name desc\"\n", "\n", " def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80):\n", " if operator in ['=', '!=']:\n", " try:\n", " date_format = '%Y-%m-%d'\n", " if context.get('lang'):\n", " lang_obj = self.pool['res.lang']\n", " lang_ids = lang_obj.search(cr, user, [('code', '=', context['lang'])], context=context)\n", " if lang_ids:\n", " date_format = lang_obj.browse(cr, user, lang_ids[0], context=context).date_format\n", " name = time.strftime('%Y-%m-%d', time.strptime(name, date_format))\n", " except ValueError:\n", " try:\n", " args.append(('rate', operator, float(name)))\n", " except ValueError:\n", " return []\n", " name = ''\n", " operator = 'ilike'\n", " return super(res_currency_rate, self).name_search(cr, user, name, args=args, operator=operator, context=context, limit=limit)\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.034482758620689655, 0, 0, 0, 0, 0, 0, 0, 0.008928571428571428, 0, 0.016129032258064516, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0.010526315789473684, 0.010309278350515464, 0.009433962264150943, 0.02247191011235955, 0.0136986301369863, 0.012195121951219513, 0.014705882352941176, 0.009900990099009901, 0, 0.01990049751243781, 0, 0, 0, 0.03333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0.022727272727272728, 0.010416666666666666, 0, 0, 0, 0.019230769230769232, 0.008403361344537815, 0, 0, 0, 0, 0, 0, 0, 0.020202020202020204, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0.011627906976744186, 0.011764705882352941, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0, 0, 0.01098901098901099, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0.01282051282051282, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0, 0, 0, 0.010101010101010102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.023255813953488372, 0, 0, 0.010526315789473684, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0.008403361344537815, 0.009523809523809525, 0.010638297872340425, 0, 0.008264462809917356, 0, 0.0072992700729927005, 0, 0.006896551724137931, 0, 0.009615384615384616, 0, 0.009615384615384616, 0.010638297872340425, 0, 0, 0.008, 0, 0, 0.029411764705882353, 0, 0, 0, 0, 0, 0.008849557522123894, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0.010416666666666666, 0, 0, 0, 0, 0, 0.009259259259259259, 0, 0.009433962264150943, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0.007462686567164179 ]
271
0.002504
import sys import time import logging import traceback try: import cPickle as pickle except ImportError: import pickle from django.conf import settings from django.core.mail import mail_admins from django.contrib.auth.models import User from django.contrib.sites.models import Site from lockfile import FileLock, AlreadyLocked, LockTimeout from notification.models import NoticeQueueBatch from notification import models as notification # lock timeout value. how long to wait for the lock to become available. # default behavior is to never wait for the lock to be available. LOCK_WAIT_TIMEOUT = getattr(settings, "NOTIFICATION_LOCK_WAIT_TIMEOUT", -1) def send_all(): lock = FileLock("send_notices") logging.debug("acquiring lock...") try: lock.acquire(LOCK_WAIT_TIMEOUT) except AlreadyLocked: logging.debug("lock already in place. quitting.") return except LockTimeout: logging.debug("waiting for the lock timed out. quitting.") return logging.debug("acquired.") batches, sent = 0, 0 start_time = time.time() try: # nesting the try statement to be Python 2.4 try: for queued_batch in NoticeQueueBatch.objects.all(): notices = pickle.loads(str(queued_batch.pickled_data).decode("base64")) for user, label, extra_context, on_site, sender, from_address in notices: try: user = User.objects.get(pk=user) logging.info("emitting notice %s to %s" % (label, user)) # call this once per user to be atomic and allow for logging to # accurately show how long each takes. notification.send_now([user], label, extra_context, on_site, sender, from_address) except User.DoesNotExist: # Ignore deleted users, just warn about them logging.warning("not emitting notice %s to user %s since it does not exist" % (label, user)) sent += 1 queued_batch.delete() batches += 1 except: # get the exception exc_class, e, t = sys.exc_info() # email people current_site = Site.objects.get_current() subject = "[%s emit_notices] %r" % (current_site.name, e) message = "%s" % ("\n".join(traceback.format_exception(*sys.exc_info())),) mail_admins(subject, message, fail_silently=True) # log it as critical logging.critical("an exception occurred: %r" % e) finally: logging.debug("releasing lock...") lock.release() logging.debug("released.") logging.info("") logging.info("%s batches, %s sent" % (batches, sent,)) logging.info("done in %.2f seconds" % (time.time() - start_time))
[ "import sys\n", "import time\n", "import logging\n", "import traceback\n", "\n", "try:\n", " import cPickle as pickle\n", "except ImportError:\n", " import pickle\n", "\n", "from django.conf import settings\n", "from django.core.mail import mail_admins\n", "from django.contrib.auth.models import User\n", "from django.contrib.sites.models import Site\n", "\n", "from lockfile import FileLock, AlreadyLocked, LockTimeout\n", "\n", "from notification.models import NoticeQueueBatch\n", "from notification import models as notification\n", "\n", "# lock timeout value. how long to wait for the lock to become available.\n", "# default behavior is to never wait for the lock to be available.\n", "LOCK_WAIT_TIMEOUT = getattr(settings, \"NOTIFICATION_LOCK_WAIT_TIMEOUT\", -1)\n", "\n", "\n", "def send_all():\n", " lock = FileLock(\"send_notices\")\n", " \n", " logging.debug(\"acquiring lock...\")\n", " try:\n", " lock.acquire(LOCK_WAIT_TIMEOUT)\n", " except AlreadyLocked:\n", " logging.debug(\"lock already in place. quitting.\")\n", " return\n", " except LockTimeout:\n", " logging.debug(\"waiting for the lock timed out. quitting.\")\n", " return\n", " logging.debug(\"acquired.\")\n", " \n", " batches, sent = 0, 0\n", " start_time = time.time()\n", " \n", " try:\n", " # nesting the try statement to be Python 2.4\n", " try:\n", " for queued_batch in NoticeQueueBatch.objects.all():\n", " notices = pickle.loads(str(queued_batch.pickled_data).decode(\"base64\"))\n", " for user, label, extra_context, on_site, sender, from_address in notices:\n", " try:\n", " user = User.objects.get(pk=user)\n", " logging.info(\"emitting notice %s to %s\" % (label, user))\n", " # call this once per user to be atomic and allow for logging to\n", " # accurately show how long each takes.\n", " notification.send_now([user], label, extra_context, on_site, sender, from_address)\n", " except User.DoesNotExist:\n", " # Ignore deleted users, just warn about them\n", " logging.warning(\"not emitting notice %s to user %s since it does not exist\" % (label, user))\n", " sent += 1\n", " queued_batch.delete()\n", " batches += 1\n", " except:\n", " # get the exception\n", " exc_class, e, t = sys.exc_info()\n", " # email people\n", " current_site = Site.objects.get_current()\n", " subject = \"[%s emit_notices] %r\" % (current_site.name, e)\n", " message = \"%s\" % (\"\\n\".join(traceback.format_exception(*sys.exc_info())),)\n", " mail_admins(subject, message, fail_silently=True)\n", " # log it as critical\n", " logging.critical(\"an exception occurred: %r\" % e)\n", " finally:\n", " logging.debug(\"releasing lock...\")\n", " lock.release()\n", " logging.debug(\"released.\")\n", " \n", " logging.info(\"\")\n", " logging.info(\"%s batches, %s sent\" % (batches, sent,))\n", " logging.info(\"done in %.2f seconds\" % (time.time() - start_time))\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0.2, 0, 0, 0, 0, 0.011363636363636364, 0.011111111111111112, 0, 0, 0.012345679012345678, 0.011363636363636364, 0, 0.009345794392523364, 0, 0, 0.008547008547008548, 0, 0, 0, 0.0625, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0 ]
78
0.012027
#!/usr/bin/env python from codecs import open from setuptools import find_packages, setup with open('README.rst', 'r', 'utf-8') as f: readme = f.read() setup( name='django-glitter-news', version='0.3.3', description='Django Glitter News for Django', long_description=readme, url='https://github.com/developersociety/django-glitter-news', maintainer='The Developer Society', maintainer_email='studio@dev.ngo', platforms=['any'], packages=find_packages(), include_package_data=True, install_requires=[ 'django-glitter', 'django-taggit>=0.21.3', 'django-admin-sortable>=2.0.0', ], classifiers=[ 'Environment :: Web Environment', 'Framework :: Django', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], license='BSD', )
[ "#!/usr/bin/env python\n", "from codecs import open\n", "\n", "from setuptools import find_packages, setup\n", "\n", "\n", "with open('README.rst', 'r', 'utf-8') as f:\n", " readme = f.read()\n", "\n", "\n", "setup(\n", " name='django-glitter-news',\n", " version='0.3.3',\n", " description='Django Glitter News for Django',\n", " long_description=readme,\n", " url='https://github.com/developersociety/django-glitter-news',\n", " maintainer='The Developer Society',\n", " maintainer_email='studio@dev.ngo',\n", " platforms=['any'],\n", " packages=find_packages(),\n", " include_package_data=True,\n", " install_requires=[\n", " 'django-glitter',\n", " 'django-taggit>=0.21.3',\n", " 'django-admin-sortable>=2.0.0',\n", " ],\n", " classifiers=[\n", " 'Environment :: Web Environment',\n", " 'Framework :: Django',\n", " 'License :: OSI Approved :: BSD License',\n", " 'Operating System :: OS Independent',\n", " 'Programming Language :: Python',\n", " 'Programming Language :: Python :: 2',\n", " 'Programming Language :: Python :: 2.7',\n", " 'Programming Language :: Python :: 3',\n", " 'Programming Language :: Python :: 3.3',\n", " 'Programming Language :: Python :: 3.4',\n", " 'Programming Language :: Python :: 3.5',\n", " ],\n", " license='BSD',\n", ")\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
41
0
from __future__ import absolute_import, print_function import logging import traceback from django.core.urlresolvers import reverse from django.utils.safestring import mark_safe from sentry.models import ( Activity, Event, Group, Organization, Project, Rule, Team, ) from sentry.utils.samples import load_data from sentry.utils.email import inline_css from sentry.utils.http import absolute_uri from sentry.web.decorators import login_required from sentry.web.helpers import render_to_response, render_to_string # TODO(dcramer): use https://github.com/disqus/django-mailviews class MailPreview(object): def __init__(self, html_template, text_template, context): self.html_template = html_template self.text_template = text_template self.context = context def text_body(self): return render_to_string(self.text_template, self.context) def html_body(self): try: return inline_css(render_to_string(self.html_template, self.context)) except Exception: traceback.print_exc() raise @login_required def new_event(request): org = Organization( id=1, slug='example', name='Example', ) team = Team( id=1, slug='example', name='Example', organization=org, ) project = Project( id=1, slug='example', name='Example', team=team, organization=org, ) group = Group( id=1, project=project, message='This is an example event.', level=logging.ERROR, ) event = Event( id=1, project=project, group=group, message=group.message, data=load_data('python'), ) rule = Rule(label="An example rule") interface_list = [] for interface in event.interfaces.itervalues(): body = interface.to_email_html(event) if not body: continue interface_list.append((interface.get_title(), mark_safe(body))) preview = MailPreview( html_template='sentry/emails/error.html', text_template='sentry/emails/error.html', context={ 'rule': rule, 'group': group, 'event': event, 'link': 'http://example.com/link', 'interfaces': interface_list, 'tags': event.get_tags(), 'project_label': project.name, }, ) return render_to_response('sentry/debug/mail/preview.html', { 'preview': preview, }) @login_required def new_note(request): org = Organization( id=1, slug='example', name='Example', ) team = Team( id=1, slug='example', name='Example', organization=org, ) project = Project( id=1, slug='example', name='Example', team=team, organization=org, ) group = Group( id=1, project=project, message='This is an example event.', ) event = Event( id=1, project=project, group=group, message=group.message, data=load_data('python'), ) note = Activity( group=event.group, event=event, project=event.project, type=Activity.NOTE, user=request.user, data={'text': 'This is an example note!'}, ) preview = MailPreview( html_template='sentry/emails/activity/note.html', text_template='sentry/emails/activity/note.txt', context={ 'data': note.data, 'author': note.user, 'date': note.datetime, 'group': group, 'link': group.get_absolute_url(), }, ) return render_to_response('sentry/debug/mail/preview.html', { 'preview': preview, }) @login_required def request_access(request): org = Organization( id=1, slug='example', name='Example', ) team = Team( id=1, slug='example', name='Example', organization=org, ) preview = MailPreview( html_template='sentry/emails/request-team-access.html', text_template='sentry/emails/request-team-access.txt', context={ 'email': 'foo@example.com', 'name': 'George Bush', 'organization': org, 'team': team, 'url': absolute_uri(reverse('sentry-organization-members', kwargs={ 'organization_slug': org.slug, }) + '?ref=access-requests'), }, ) return render_to_response('sentry/debug/mail/preview.html', { 'preview': preview, }) @login_required def access_approved(request): org = Organization( id=1, slug='example', name='Example', ) team = Team( id=1, slug='example', name='Example', organization=org, ) preview = MailPreview( html_template='sentry/emails/access-approved.html', text_template='sentry/emails/access-approved.txt', context={ 'email': 'foo@example.com', 'name': 'George Bush', 'organization': org, 'team': team, }, ) return render_to_response('sentry/debug/mail/preview.html', { 'preview': preview, })
[ "from __future__ import absolute_import, print_function\n", "\n", "import logging\n", "import traceback\n", "\n", "from django.core.urlresolvers import reverse\n", "from django.utils.safestring import mark_safe\n", "\n", "from sentry.models import (\n", " Activity, Event, Group, Organization, Project, Rule, Team,\n", ")\n", "from sentry.utils.samples import load_data\n", "from sentry.utils.email import inline_css\n", "from sentry.utils.http import absolute_uri\n", "from sentry.web.decorators import login_required\n", "from sentry.web.helpers import render_to_response, render_to_string\n", "\n", "\n", "# TODO(dcramer): use https://github.com/disqus/django-mailviews\n", "class MailPreview(object):\n", " def __init__(self, html_template, text_template, context):\n", " self.html_template = html_template\n", " self.text_template = text_template\n", " self.context = context\n", "\n", " def text_body(self):\n", " return render_to_string(self.text_template, self.context)\n", "\n", " def html_body(self):\n", " try:\n", " return inline_css(render_to_string(self.html_template, self.context))\n", " except Exception:\n", " traceback.print_exc()\n", " raise\n", "\n", "\n", "@login_required\n", "def new_event(request):\n", " org = Organization(\n", " id=1,\n", " slug='example',\n", " name='Example',\n", " )\n", " team = Team(\n", " id=1,\n", " slug='example',\n", " name='Example',\n", " organization=org,\n", " )\n", " project = Project(\n", " id=1,\n", " slug='example',\n", " name='Example',\n", " team=team,\n", " organization=org,\n", " )\n", " group = Group(\n", " id=1,\n", " project=project,\n", " message='This is an example event.',\n", " level=logging.ERROR,\n", " )\n", "\n", " event = Event(\n", " id=1,\n", " project=project,\n", " group=group,\n", " message=group.message,\n", " data=load_data('python'),\n", " )\n", "\n", " rule = Rule(label=\"An example rule\")\n", "\n", " interface_list = []\n", " for interface in event.interfaces.itervalues():\n", " body = interface.to_email_html(event)\n", " if not body:\n", " continue\n", " interface_list.append((interface.get_title(), mark_safe(body)))\n", "\n", " preview = MailPreview(\n", " html_template='sentry/emails/error.html',\n", " text_template='sentry/emails/error.html',\n", " context={\n", " 'rule': rule,\n", " 'group': group,\n", " 'event': event,\n", " 'link': 'http://example.com/link',\n", " 'interfaces': interface_list,\n", " 'tags': event.get_tags(),\n", " 'project_label': project.name,\n", " },\n", " )\n", "\n", " return render_to_response('sentry/debug/mail/preview.html', {\n", " 'preview': preview,\n", " })\n", "\n", "\n", "@login_required\n", "def new_note(request):\n", " org = Organization(\n", " id=1,\n", " slug='example',\n", " name='Example',\n", " )\n", " team = Team(\n", " id=1,\n", " slug='example',\n", " name='Example',\n", " organization=org,\n", " )\n", " project = Project(\n", " id=1,\n", " slug='example',\n", " name='Example',\n", " team=team,\n", " organization=org,\n", " )\n", " group = Group(\n", " id=1,\n", " project=project,\n", " message='This is an example event.',\n", " )\n", " event = Event(\n", " id=1,\n", " project=project,\n", " group=group,\n", " message=group.message,\n", " data=load_data('python'),\n", " )\n", " note = Activity(\n", " group=event.group, event=event, project=event.project,\n", " type=Activity.NOTE, user=request.user,\n", " data={'text': 'This is an example note!'},\n", " )\n", "\n", " preview = MailPreview(\n", " html_template='sentry/emails/activity/note.html',\n", " text_template='sentry/emails/activity/note.txt',\n", " context={\n", " 'data': note.data,\n", " 'author': note.user,\n", " 'date': note.datetime,\n", " 'group': group,\n", " 'link': group.get_absolute_url(),\n", " },\n", " )\n", "\n", " return render_to_response('sentry/debug/mail/preview.html', {\n", " 'preview': preview,\n", " })\n", "\n", "\n", "@login_required\n", "def request_access(request):\n", " org = Organization(\n", " id=1,\n", " slug='example',\n", " name='Example',\n", " )\n", " team = Team(\n", " id=1,\n", " slug='example',\n", " name='Example',\n", " organization=org,\n", " )\n", "\n", " preview = MailPreview(\n", " html_template='sentry/emails/request-team-access.html',\n", " text_template='sentry/emails/request-team-access.txt',\n", " context={\n", " 'email': 'foo@example.com',\n", " 'name': 'George Bush',\n", " 'organization': org,\n", " 'team': team,\n", " 'url': absolute_uri(reverse('sentry-organization-members', kwargs={\n", " 'organization_slug': org.slug,\n", " }) + '?ref=access-requests'),\n", " },\n", " )\n", "\n", " return render_to_response('sentry/debug/mail/preview.html', {\n", " 'preview': preview,\n", " })\n", "\n", "\n", "@login_required\n", "def access_approved(request):\n", " org = Organization(\n", " id=1,\n", " slug='example',\n", " name='Example',\n", " )\n", " team = Team(\n", " id=1,\n", " slug='example',\n", " name='Example',\n", " organization=org,\n", " )\n", "\n", " preview = MailPreview(\n", " html_template='sentry/emails/access-approved.html',\n", " text_template='sentry/emails/access-approved.txt',\n", " context={\n", " 'email': 'foo@example.com',\n", " 'name': 'George Bush',\n", " 'organization': org,\n", " 'team': team,\n", " },\n", " )\n", "\n", " return render_to_response('sentry/debug/mail/preview.html', {\n", " 'preview': preview,\n", " })\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
215
0.000057
# -*- coding: utf-8 -*- from __future__ import unicode_literals import os from pathomx.plugins import ImportPlugin from PIL import Image from pathomx.qt import * import pathomx.ui as ui import pathomx.utils as utils COLORSPACES = { 'None (use image default)': None, 'I (1-bit pixels, black and white, stored with one pixel per byte)': 'I', 'L (8-bit pixels, black and white)': 'L', 'P (8-bit pixels, mapped to any other mode using a color palette)': 'P', 'RGB (3x8-bit pixels, true color)': 'RGB', 'RGBA (4x8-bit pixels, true color with transparency mask)': 'RGBA', 'CMYK (4x8-bit pixels, color separation)': 'CMYK', 'YCbCr (3x8-bit pixels, color video format)': 'YCbCr', 'LAB (3x8-bit pixels, the L*a*b color space)': 'LAB', 'HSV (3x8-bit pixels, Hue, Saturation, Value color space)': 'HSV', 'I (32-bit signed integer pixels)': 'I', 'F (32-bit floating point pixels)': 'F', } class ImportImageConfigPanel(ui.ConfigPanel): def __init__(self, parent, filename=None, *args, **kwargs): super(ImportImageConfigPanel, self).__init__(parent, *args, **kwargs) self.v = parent self.config = parent.config gb = QGroupBox('Open file') grid = QGridLayout() self.filename = ui.QFileOpenLineEdit(filename_filter="""All compatible files (*.bmp *.dib *.eps *.gif *.im *.jpg *.jpe *.jpeg *.pcx *.pcd *.psd *.png *.pbm *.pgm *.ppm *.spi *.sgi *.tif *.tiff *.xbm *.xpm);; Bitmap Image File (*.bmp *.dib);;Encapsulated PostScript (*.eps);; Graphics Interchange Format (*.gif);;IM (LabEye) Format (*.im);; Joint Photographic Experts Group (*.jpg *.jpe *.jpeg);; Personal Computer Exchange (*.pcx);;PhotoCD Format (*.pcd);;Photoshop Document (*.psd);; Portable Network Graphics (*.png);;Portable Bitmap/NetPBM (*.pbm *.pgm *.ppm);; Truevision TGA (*.tga);; Tagged Image File Format (*.tif *.tiff);; Silicon Graphics Image (*.sgi);; SPIDER Format (*.spi);; WebP Format (*.webp);; X Bitmap (*.xbm);;X Pixmap (*.xpm);;All files (*.*)""",description="Open image file") grid.addWidget(QLabel('Path'), 0, 0) grid.addWidget(self.filename, 0, 1) self.config.add_handler('filename', self.filename) self.cb_color = QComboBox() self.cb_color.addItems(list(COLORSPACES.keys())) grid.addWidget(QLabel('Mode'), 2, 0) grid.addWidget(self.cb_color, 2, 1) self.config.add_handler('colorspace', self.cb_color, COLORSPACES) gb.setLayout(grid) self.layout.addWidget(gb) self.finalise() class ImportImageApp(ui.GenericTool): shortname = 'import_image' autoconfig_name = "{filename}" def __init__(self, *args, **kwargs): super(ImportImageApp, self).__init__(*args, **kwargs) self.config.set_defaults({ 'filename': None, 'colorspace': None, }) self.addConfigPanel(ImportImageConfigPanel, 'Settings') self.data.add_output('output_image') # Add output slot class ImportImage(ImportPlugin): def __init__(self, *args, **kwargs): super(ImportImage, self).__init__(*args, **kwargs) self.register_app_launcher(ImportImageApp) self.register_file_handler(ImportImageApp, 'png') self.register_file_handler(ImportImageApp, 'tif') self.register_file_handler(ImportImageApp, 'tiff') self.register_file_handler(ImportImageApp, 'jpeg') self.register_file_handler(ImportImageApp, 'jpg')
[ "# -*- coding: utf-8 -*-\n", "from __future__ import unicode_literals\n", "import os\n", "\n", "from pathomx.plugins import ImportPlugin\n", "\n", "from PIL import Image\n", "\n", "from pathomx.qt import *\n", "\n", "import pathomx.ui as ui\n", "import pathomx.utils as utils\n", "\n", "COLORSPACES = {\n", " 'None (use image default)': None,\n", " 'I (1-bit pixels, black and white, stored with one pixel per byte)': 'I',\n", " 'L (8-bit pixels, black and white)': 'L',\n", " 'P (8-bit pixels, mapped to any other mode using a color palette)': 'P',\n", " 'RGB (3x8-bit pixels, true color)': 'RGB',\n", " 'RGBA (4x8-bit pixels, true color with transparency mask)': 'RGBA',\n", " 'CMYK (4x8-bit pixels, color separation)': 'CMYK',\n", " 'YCbCr (3x8-bit pixels, color video format)': 'YCbCr',\n", " 'LAB (3x8-bit pixels, the L*a*b color space)': 'LAB',\n", " 'HSV (3x8-bit pixels, Hue, Saturation, Value color space)': 'HSV',\n", " 'I (32-bit signed integer pixels)': 'I',\n", " 'F (32-bit floating point pixels)': 'F',\n", "}\n", "\n", "\n", "class ImportImageConfigPanel(ui.ConfigPanel):\n", "\n", " def __init__(self, parent, filename=None, *args, **kwargs):\n", " super(ImportImageConfigPanel, self).__init__(parent, *args, **kwargs)\n", "\n", " self.v = parent\n", " self.config = parent.config\n", " gb = QGroupBox('Open file')\n", " grid = QGridLayout()\n", " self.filename = ui.QFileOpenLineEdit(filename_filter=\"\"\"All compatible files \n", "(*.bmp *.dib *.eps *.gif *.im *.jpg *.jpe *.jpeg *.pcx *.pcd *.psd *.png *.pbm *.pgm *.ppm *.spi *.sgi *.tif *.tiff *.xbm *.xpm);;\n", "Bitmap Image File (*.bmp *.dib);;Encapsulated PostScript (*.eps);;\n", "Graphics Interchange Format (*.gif);;IM (LabEye) Format (*.im);;\n", "Joint Photographic Experts Group (*.jpg *.jpe *.jpeg);;\n", "Personal Computer Exchange (*.pcx);;PhotoCD Format (*.pcd);;Photoshop Document (*.psd);;\n", "Portable Network Graphics (*.png);;Portable Bitmap/NetPBM (*.pbm *.pgm *.ppm);;\n", "Truevision TGA (*.tga);;\n", "Tagged Image File Format (*.tif *.tiff);;\n", "Silicon Graphics Image (*.sgi);;\n", "SPIDER Format (*.spi);;\n", "WebP Format (*.webp);;\n", "X Bitmap (*.xbm);;X Pixmap (*.xpm);;All files (*.*)\"\"\",description=\"Open image file\")\n", " grid.addWidget(QLabel('Path'), 0, 0)\n", " grid.addWidget(self.filename, 0, 1)\n", " self.config.add_handler('filename', self.filename)\n", " \n", " self.cb_color = QComboBox()\n", " self.cb_color.addItems(list(COLORSPACES.keys()))\n", " grid.addWidget(QLabel('Mode'), 2, 0)\n", " grid.addWidget(self.cb_color, 2, 1)\n", " self.config.add_handler('colorspace', self.cb_color, COLORSPACES)\n", " \n", " gb.setLayout(grid)\n", "\n", " self.layout.addWidget(gb)\n", "\n", " self.finalise()\n", "\n", "\n", "class ImportImageApp(ui.GenericTool):\n", "\n", " shortname = 'import_image'\n", " autoconfig_name = \"{filename}\"\n", "\n", " def __init__(self, *args, **kwargs):\n", " super(ImportImageApp, self).__init__(*args, **kwargs)\n", "\n", " self.config.set_defaults({\n", " 'filename': None,\n", " 'colorspace': None,\n", " })\n", "\n", " self.addConfigPanel(ImportImageConfigPanel, 'Settings')\n", "\n", " self.data.add_output('output_image') # Add output slot\n", "\n", "\n", "\n", "\n", "class ImportImage(ImportPlugin):\n", "\n", " def __init__(self, *args, **kwargs):\n", " super(ImportImage, self).__init__(*args, **kwargs)\n", " self.register_app_launcher(ImportImageApp)\n", " \n", " self.register_file_handler(ImportImageApp, 'png')\n", " self.register_file_handler(ImportImageApp, 'tif')\n", " self.register_file_handler(ImportImageApp, 'tiff')\n", " self.register_file_handler(ImportImageApp, 'jpeg')\n", " self.register_file_handler(ImportImageApp, 'jpg')\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.023255813953488372, 0.007633587786259542, 0, 0, 0, 0.011235955056179775, 0, 0, 0, 0, 0, 0, 0.023255813953488372, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.030303030303030304, 0, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0, 0 ]
99
0.004334
""" Tests for credit courses on the student dashboard. """ import unittest import datetime import pytz from mock import patch from django.conf import settings from django.core.urlresolvers import reverse from django.test.utils import override_settings from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory from student.models import CourseEnrollmentAttribute from student.tests.factories import UserFactory, CourseEnrollmentFactory from openedx.core.djangoapps.credit.models import CreditCourse, CreditProvider, CreditEligibility from openedx.core.djangoapps.credit import api as credit_api TEST_CREDIT_PROVIDER_SECRET_KEY = "931433d583c84ca7ba41784bad3232e6" @unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') @override_settings(CREDIT_PROVIDER_SECRET_KEYS={ "hogwarts": TEST_CREDIT_PROVIDER_SECRET_KEY, }) @patch.dict(settings.FEATURES, {"ENABLE_CREDIT_ELIGIBILITY": True}) class CreditCourseDashboardTest(ModuleStoreTestCase): """ Tests for credit courses on the student dashboard. """ USERNAME = "ron" PASSWORD = "mobiliarbus" PROVIDER_ID = "hogwarts" PROVIDER_NAME = "Hogwarts School of Witchcraft and Wizardry" PROVIDER_STATUS_URL = "http://credit.example.com/status" def setUp(self): """Create a course and an enrollment. """ super(CreditCourseDashboardTest, self).setUp() # Create a user and log in self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD) result = self.client.login(username=self.USERNAME, password=self.PASSWORD) self.assertTrue(result, msg="Could not log in") # Create a course and configure it as a credit course self.course = CourseFactory() CreditCourse.objects.create(course_key=self.course.id, enabled=True) # pylint: disable=no-member # Configure a credit provider CreditProvider.objects.create( provider_id=self.PROVIDER_ID, display_name=self.PROVIDER_NAME, provider_status_url=self.PROVIDER_STATUS_URL, enable_integration=True, ) # Configure a single credit requirement (minimum passing grade) credit_api.set_credit_requirements( self.course.id, # pylint: disable=no-member [ { "namespace": "grade", "name": "grade", "display_name": "Final Grade", "criteria": { "min_grade": 0.8 } } ] ) # Enroll the user in the course as "verified" self.enrollment = CourseEnrollmentFactory( user=self.user, course_id=self.course.id, # pylint: disable=no-member mode="verified" ) def test_not_eligible_for_credit(self): # The user is not yet eligible for credit, so no additional information should be displayed on the dashboard. response = self._load_dashboard() self.assertNotContains(response, "credit") def test_eligible_for_credit(self): # Simulate that the user has completed the only requirement in the course # so the user is eligible for credit. self._make_eligible() # The user should have the option to purchase credit response = self._load_dashboard() self.assertContains(response, "credit-eligibility-msg") self.assertContains(response, "purchase-credit-btn") # Move the eligibility deadline so it's within 30 days eligibility = CreditEligibility.objects.get(username=self.USERNAME) eligibility.deadline = datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=29) eligibility.save() # The user should still have the option to purchase credit, # but there should also be a message urging the user to purchase soon. response = self._load_dashboard() self.assertContains(response, "credit-eligibility-msg") self.assertContains(response, "purchase-credit-btn") self.assertContains(response, "purchase credit for this course expires") def test_purchased_credit(self): # Simulate that the user has purchased credit, but has not # yet initiated a request to the credit provider self._make_eligible() self._purchase_credit() # Expect that the user's status is "pending" response = self._load_dashboard() self.assertContains(response, "credit-request-pending-msg") def test_purchased_credit_and_request_pending(self): # Simulate that the user has purchased credit and initiated a request, # but we haven't yet heard back from the credit provider. self._make_eligible() self._purchase_credit() self._initiate_request() # Expect that the user's status is "pending" response = self._load_dashboard() self.assertContains(response, "credit-request-pending-msg") def test_purchased_credit_and_request_approved(self): # Simulate that the user has purchased credit and initiated a request, # and had that request approved by the credit provider self._make_eligible() self._purchase_credit() request_uuid = self._initiate_request() self._set_request_status(request_uuid, "approved") # Expect that the user's status is "approved" response = self._load_dashboard() self.assertContains(response, "credit-request-approved-msg") def test_purchased_credit_and_request_rejected(self): # Simulate that the user has purchased credit and initiated a request, # and had that request rejected by the credit provider self._make_eligible() self._purchase_credit() request_uuid = self._initiate_request() self._set_request_status(request_uuid, "rejected") # Expect that the user's status is "approved" response = self._load_dashboard() self.assertContains(response, "credit-request-rejected-msg") def test_credit_status_error(self): # Simulate an error condition: the user has a credit enrollment # but no enrollment attribute indicating which provider the user # purchased credit from. self._make_eligible() self._purchase_credit() CourseEnrollmentAttribute.objects.all().delete() # Expect an error message response = self._load_dashboard() self.assertContains(response, "credit-error-msg") def _load_dashboard(self): """Load the student dashboard and return the HttpResponse. """ return self.client.get(reverse("dashboard")) def _make_eligible(self): """Make the user eligible for credit in the course. """ credit_api.set_credit_requirement_status( self.USERNAME, self.course.id, # pylint: disable=no-member "grade", "grade", status="satisfied", reason={ "final_grade": 0.95 } ) def _purchase_credit(self): """Purchase credit from a provider in the course. """ self.enrollment.mode = "credit" self.enrollment.save() # pylint: disable=no-member CourseEnrollmentAttribute.objects.create( enrollment=self.enrollment, namespace="credit", name="provider_id", value=self.PROVIDER_ID, ) def _initiate_request(self): """Initiate a request for credit from a provider. """ request = credit_api.create_credit_request( self.course.id, # pylint: disable=no-member self.PROVIDER_ID, self.USERNAME ) return request["parameters"]["request_uuid"] def _set_request_status(self, uuid, status): """Set the status of a request for credit, simulating the notification from the provider. """ credit_api.update_credit_request_status(uuid, self.PROVIDER_ID, status)
[ "\"\"\"\n", "Tests for credit courses on the student dashboard.\n", "\"\"\"\n", "import unittest\n", "import datetime\n", "\n", "import pytz\n", "from mock import patch\n", "\n", "from django.conf import settings\n", "from django.core.urlresolvers import reverse\n", "from django.test.utils import override_settings\n", "\n", "from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase\n", "from xmodule.modulestore.tests.factories import CourseFactory\n", "from student.models import CourseEnrollmentAttribute\n", "from student.tests.factories import UserFactory, CourseEnrollmentFactory\n", "\n", "from openedx.core.djangoapps.credit.models import CreditCourse, CreditProvider, CreditEligibility\n", "from openedx.core.djangoapps.credit import api as credit_api\n", "\n", "\n", "TEST_CREDIT_PROVIDER_SECRET_KEY = \"931433d583c84ca7ba41784bad3232e6\"\n", "\n", "\n", "@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')\n", "@override_settings(CREDIT_PROVIDER_SECRET_KEYS={\n", " \"hogwarts\": TEST_CREDIT_PROVIDER_SECRET_KEY,\n", "})\n", "@patch.dict(settings.FEATURES, {\"ENABLE_CREDIT_ELIGIBILITY\": True})\n", "class CreditCourseDashboardTest(ModuleStoreTestCase):\n", " \"\"\"\n", " Tests for credit courses on the student dashboard.\n", " \"\"\"\n", "\n", " USERNAME = \"ron\"\n", " PASSWORD = \"mobiliarbus\"\n", "\n", " PROVIDER_ID = \"hogwarts\"\n", " PROVIDER_NAME = \"Hogwarts School of Witchcraft and Wizardry\"\n", " PROVIDER_STATUS_URL = \"http://credit.example.com/status\"\n", "\n", " def setUp(self):\n", " \"\"\"Create a course and an enrollment. \"\"\"\n", " super(CreditCourseDashboardTest, self).setUp()\n", "\n", " # Create a user and log in\n", " self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)\n", " result = self.client.login(username=self.USERNAME, password=self.PASSWORD)\n", " self.assertTrue(result, msg=\"Could not log in\")\n", "\n", " # Create a course and configure it as a credit course\n", " self.course = CourseFactory()\n", " CreditCourse.objects.create(course_key=self.course.id, enabled=True) # pylint: disable=no-member\n", "\n", " # Configure a credit provider\n", " CreditProvider.objects.create(\n", " provider_id=self.PROVIDER_ID,\n", " display_name=self.PROVIDER_NAME,\n", " provider_status_url=self.PROVIDER_STATUS_URL,\n", " enable_integration=True,\n", " )\n", "\n", " # Configure a single credit requirement (minimum passing grade)\n", " credit_api.set_credit_requirements(\n", " self.course.id, # pylint: disable=no-member\n", " [\n", " {\n", " \"namespace\": \"grade\",\n", " \"name\": \"grade\",\n", " \"display_name\": \"Final Grade\",\n", " \"criteria\": {\n", " \"min_grade\": 0.8\n", " }\n", " }\n", " ]\n", " )\n", "\n", " # Enroll the user in the course as \"verified\"\n", " self.enrollment = CourseEnrollmentFactory(\n", " user=self.user,\n", " course_id=self.course.id, # pylint: disable=no-member\n", " mode=\"verified\"\n", " )\n", "\n", " def test_not_eligible_for_credit(self):\n", " # The user is not yet eligible for credit, so no additional information should be displayed on the dashboard.\n", " response = self._load_dashboard()\n", " self.assertNotContains(response, \"credit\")\n", "\n", " def test_eligible_for_credit(self):\n", " # Simulate that the user has completed the only requirement in the course\n", " # so the user is eligible for credit.\n", " self._make_eligible()\n", "\n", " # The user should have the option to purchase credit\n", " response = self._load_dashboard()\n", " self.assertContains(response, \"credit-eligibility-msg\")\n", " self.assertContains(response, \"purchase-credit-btn\")\n", "\n", " # Move the eligibility deadline so it's within 30 days\n", " eligibility = CreditEligibility.objects.get(username=self.USERNAME)\n", " eligibility.deadline = datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=29)\n", " eligibility.save()\n", "\n", " # The user should still have the option to purchase credit,\n", " # but there should also be a message urging the user to purchase soon.\n", " response = self._load_dashboard()\n", " self.assertContains(response, \"credit-eligibility-msg\")\n", " self.assertContains(response, \"purchase-credit-btn\")\n", " self.assertContains(response, \"purchase credit for this course expires\")\n", "\n", " def test_purchased_credit(self):\n", " # Simulate that the user has purchased credit, but has not\n", " # yet initiated a request to the credit provider\n", " self._make_eligible()\n", " self._purchase_credit()\n", "\n", " # Expect that the user's status is \"pending\"\n", " response = self._load_dashboard()\n", " self.assertContains(response, \"credit-request-pending-msg\")\n", "\n", " def test_purchased_credit_and_request_pending(self):\n", " # Simulate that the user has purchased credit and initiated a request,\n", " # but we haven't yet heard back from the credit provider.\n", " self._make_eligible()\n", " self._purchase_credit()\n", " self._initiate_request()\n", "\n", " # Expect that the user's status is \"pending\"\n", " response = self._load_dashboard()\n", " self.assertContains(response, \"credit-request-pending-msg\")\n", "\n", " def test_purchased_credit_and_request_approved(self):\n", " # Simulate that the user has purchased credit and initiated a request,\n", " # and had that request approved by the credit provider\n", " self._make_eligible()\n", " self._purchase_credit()\n", " request_uuid = self._initiate_request()\n", " self._set_request_status(request_uuid, \"approved\")\n", "\n", " # Expect that the user's status is \"approved\"\n", " response = self._load_dashboard()\n", " self.assertContains(response, \"credit-request-approved-msg\")\n", "\n", " def test_purchased_credit_and_request_rejected(self):\n", " # Simulate that the user has purchased credit and initiated a request,\n", " # and had that request rejected by the credit provider\n", " self._make_eligible()\n", " self._purchase_credit()\n", " request_uuid = self._initiate_request()\n", " self._set_request_status(request_uuid, \"rejected\")\n", "\n", " # Expect that the user's status is \"approved\"\n", " response = self._load_dashboard()\n", " self.assertContains(response, \"credit-request-rejected-msg\")\n", "\n", " def test_credit_status_error(self):\n", " # Simulate an error condition: the user has a credit enrollment\n", " # but no enrollment attribute indicating which provider the user\n", " # purchased credit from.\n", " self._make_eligible()\n", " self._purchase_credit()\n", " CourseEnrollmentAttribute.objects.all().delete()\n", "\n", " # Expect an error message\n", " response = self._load_dashboard()\n", " self.assertContains(response, \"credit-error-msg\")\n", "\n", " def _load_dashboard(self):\n", " \"\"\"Load the student dashboard and return the HttpResponse. \"\"\"\n", " return self.client.get(reverse(\"dashboard\"))\n", "\n", " def _make_eligible(self):\n", " \"\"\"Make the user eligible for credit in the course. \"\"\"\n", " credit_api.set_credit_requirement_status(\n", " self.USERNAME,\n", " self.course.id, # pylint: disable=no-member\n", " \"grade\", \"grade\",\n", " status=\"satisfied\",\n", " reason={\n", " \"final_grade\": 0.95\n", " }\n", " )\n", "\n", " def _purchase_credit(self):\n", " \"\"\"Purchase credit from a provider in the course. \"\"\"\n", " self.enrollment.mode = \"credit\"\n", " self.enrollment.save() # pylint: disable=no-member\n", "\n", " CourseEnrollmentAttribute.objects.create(\n", " enrollment=self.enrollment,\n", " namespace=\"credit\",\n", " name=\"provider_id\",\n", " value=self.PROVIDER_ID,\n", " )\n", "\n", " def _initiate_request(self):\n", " \"\"\"Initiate a request for credit from a provider. \"\"\"\n", " request = credit_api.create_credit_request(\n", " self.course.id, # pylint: disable=no-member\n", " self.PROVIDER_ID,\n", " self.USERNAME\n", " )\n", " return request[\"parameters\"][\"request_uuid\"]\n", "\n", " def _set_request_status(self, uuid, status):\n", " \"\"\"Set the status of a request for credit, simulating the notification from the provider. \"\"\"\n", " credit_api.update_credit_request_status(uuid, self.PROVIDER_ID, status)\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01020408163265306, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0.012048192771084338, 0, 0, 0, 0, 0.009433962264150943, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00847457627118644, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010752688172043012, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00980392156862745, 0 ]
209
0.00052
import sys import os.path from datetime import timedelta ROOT_DIR = os.path.join(os.path.dirname(__file__), '..') sys.path.append(os.path.join(ROOT_DIR, 'bin')) import scheduler from lib.plugins.poll_task import PollTask class TestScheduler(object): # BROKER_URL def test_scheduler_broker_url_should_be_set(self): scheduler.BROKER_URL.should.equal('someHost') # CELERYBEAT_SCHEDULE def test_scheduler_celerybeat_schedule_should_be_set(self): scheduler.CELERYBEAT_SCHEDULE.should.equal({ 'TukeysFilter': { 'task': 'lib.app.task_runner', 'schedule': timedelta(seconds=60), 'args': (PollTask, {'name': 'TukeysFilter'}) }, 'SeasonalDecomposition': { 'task': 'lib.app.task_runner', 'schedule': timedelta(seconds=300), 'args': (PollTask, {'name': 'SeasonalDecomposition'}) }, 'SeasonalDecompositionEnsemble': { 'task': 'lib.app.task_runner', 'schedule': timedelta(seconds=180), 'args': (PollTask, {'name': 'SeasonalDecompositionEnsemble'}) }, 'FlowDifference': { 'task': 'lib.app.task_runner', 'schedule': timedelta(seconds=600), 'args': (PollTask, {'name': 'FlowDifference'}) } })
[ "import sys\n", "import os.path\n", "from datetime import timedelta\n", "\n", "ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')\n", "sys.path.append(os.path.join(ROOT_DIR, 'bin'))\n", "\n", "import scheduler\n", "from lib.plugins.poll_task import PollTask\n", "\n", "\n", "class TestScheduler(object):\n", "\n", " # BROKER_URL\n", "\n", " def test_scheduler_broker_url_should_be_set(self):\n", " scheduler.BROKER_URL.should.equal('someHost')\n", "\n", " # CELERYBEAT_SCHEDULE\n", "\n", " def test_scheduler_celerybeat_schedule_should_be_set(self):\n", " scheduler.CELERYBEAT_SCHEDULE.should.equal({\n", " 'TukeysFilter': {\n", " 'task': 'lib.app.task_runner',\n", " 'schedule': timedelta(seconds=60),\n", " 'args': (PollTask, {'name': 'TukeysFilter'})\n", " },\n", " 'SeasonalDecomposition': {\n", " 'task': 'lib.app.task_runner',\n", " 'schedule': timedelta(seconds=300),\n", " 'args': (PollTask, {'name': 'SeasonalDecomposition'})\n", " },\n", " 'SeasonalDecompositionEnsemble': {\n", " 'task': 'lib.app.task_runner',\n", " 'schedule': timedelta(seconds=180),\n", " 'args': (PollTask, {'name': 'SeasonalDecompositionEnsemble'})\n", " },\n", " 'FlowDifference': {\n", " 'task': 'lib.app.task_runner',\n", " 'schedule': timedelta(seconds=600),\n", " 'args': (PollTask, {'name': 'FlowDifference'})\n", " }\n", " })\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705, 0.023255813953488372, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
43
0.001909
#!/usr/bin/env python # Copyright 2011 The hop Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=C0103 """Setup script for hop.""" import os.path import sys from setuptools import setup from distutils.command import install_data class hop_install(install_data.install_data): """Handle installing data for hop.""" def run(self): """Install commands and dotfiles.""" install_data.install_data.run(self) bash_options = ('~/.bashrc', '~/.bash_profile') bashrc_path = None for bash in bash_options: expanded = os.path.expanduser(bash) if os.path.isfile(expanded): bashrc_path = expanded break prefix = os.path.join(sys.prefix, 'hop') required_commands = { '/hop.bash':"# Initialize the 'hop' script\n source %s" % os.path.join(prefix, 'hop.bash'), 'hop-lua-script':'# Define an entry point for the lua script version of hop\n' 'alias hop-lua-script="LUA_PATH=%s %s"' % (os.path.join(prefix, 'json.lua'), os.path.join(prefix, 'hop.lua')) } # First check if the reference to hop.bash is already installed. with open(bashrc_path, "r") as f: bashrc_content = f.read() for k in required_commands.keys(): if k in bashrc_content: del required_commands[k] if required_commands: with open(bashrc_path, "a") as f: for v in required_commands.values(): f.write(v + '\n') print print "Done. Now type '. ~/.bashrc'. Then type 'hop'." return True setup(name='Hop', version='1.0', description='Easily jump to your favorite directories', license='Apache', author='Greplin', author_email='robbyw@greplin.com', url='https://www.github.com/Cue/hop', packages=['hop'], data_files=[('hop', ['hop/hop.bash', 'hop/hop.sh', 'hop/hop.lua', 'hop/json.lua'])], entry_points = { 'console_scripts': [ 'hop-python-script = hop.hop:main' ], }, cmdclass=dict(install_data=hop_install) )
[ "#!/usr/bin/env python\n", "# Copyright 2011 The hop Authors.\n", "#\n", "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", "# you may not use this file except in compliance with the License.\n", "# You may obtain a copy of the License at\n", "#\n", "# http://www.apache.org/licenses/LICENSE-2.0\n", "#\n", "# Unless required by applicable law or agreed to in writing, software\n", "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", "# See the License for the specific language governing permissions and\n", "# limitations under the License.\n", "# pylint: disable=C0103\n", "\n", "\"\"\"Setup script for hop.\"\"\"\n", "\n", "import os.path\n", "import sys\n", "\n", "from setuptools import setup\n", "from distutils.command import install_data\n", "\n", "\n", "\n", "class hop_install(install_data.install_data):\n", " \"\"\"Handle installing data for hop.\"\"\"\n", "\n", " def run(self):\n", " \"\"\"Install commands and dotfiles.\"\"\"\n", " install_data.install_data.run(self)\n", "\n", "\n", " bash_options = ('~/.bashrc', '~/.bash_profile')\n", " bashrc_path = None\n", " for bash in bash_options:\n", " expanded = os.path.expanduser(bash)\n", " if os.path.isfile(expanded):\n", " bashrc_path = expanded\n", " break\n", "\n", " prefix = os.path.join(sys.prefix, 'hop')\n", " required_commands = {\n", " '/hop.bash':\"# Initialize the 'hop' script\\n source %s\" % os.path.join(prefix, 'hop.bash'),\n", " 'hop-lua-script':'# Define an entry point for the lua script version of hop\\n'\n", " 'alias hop-lua-script=\"LUA_PATH=%s %s\"' % (os.path.join(prefix, 'json.lua'),\n", " os.path.join(prefix, 'hop.lua'))\n", " }\n", " # First check if the reference to hop.bash is already installed.\n", " with open(bashrc_path, \"r\") as f:\n", " bashrc_content = f.read()\n", " for k in required_commands.keys():\n", " if k in bashrc_content:\n", " del required_commands[k]\n", "\n", " if required_commands:\n", " with open(bashrc_path, \"a\") as f:\n", " for v in required_commands.values():\n", " f.write(v + '\\n')\n", "\n", " print\n", " print \"Done. Now type '. ~/.bashrc'. Then type 'hop'.\"\n", "\n", " return True\n", "\n", "\n", "setup(name='Hop',\n", " version='1.0',\n", " description='Easily jump to your favorite directories',\n", " license='Apache',\n", " author='Greplin',\n", " author_email='robbyw@greplin.com',\n", " url='https://www.github.com/Cue/hop',\n", " packages=['hop'],\n", " data_files=[('hop', ['hop/hop.bash', 'hop/hop.sh', 'hop/hop.lua', 'hop/json.lua'])],\n", " entry_points = {\n", " 'console_scripts': [\n", " 'hop-python-script = hop.hop:main'\n", " ],\n", " },\n", " cmdclass=dict(install_data=hop_install)\n", " )\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.021739130434782608, 0.025, 0, 0.058823529411764705, 0, 0, 0, 0, 0.019230769230769232, 0, 0, 0.023809523809523808, 0.02857142857142857, 0, 0, 0, 0, 0, 0.02040816326530612, 0.023529411764705882, 0.01, 0.010101010101010102, 0, 0, 0, 0.03125, 0.024390243902439025, 0, 0.02857142857142857, 0, 0, 0.025, 0, 0.03571428571428571, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01098901098901099, 0.08695652173913043, 0, 0, 0, 0, 0, 0.14285714285714285 ]
83
0.007554
import uuid from datetime import datetime from sqlalchemy import Column, String, DateTime, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.schema import Index from changes.config import db from changes.constants import Status, Result from changes.db.utils import model_repr from changes.db.types.enum import Enum from changes.db.types.guid import GUID from changes.db.types.json import JSONEncodedDict from changes.models.bazeltarget import BazelTarget class FutureJobStep(object): def __init__(self, label, commands=None, data=None): self.label = label self.commands = commands or [] self.data = data or {} def as_jobstep(self, jobphase): return JobStep( job_id=jobphase.job_id, phase=jobphase, phase_id=jobphase.id, project_id=jobphase.project_id, label=self.label, status=Status.queued, data=self.data, ) class JobStep(db.Model): """ The most granular unit of work; run on a particular node, has a status and a result. """ __tablename__ = 'jobstep' __table_args__ = ( Index('idx_jobstep_status', 'status'), Index('idx_jobstep_cluster', 'cluster'), Index('idx_jobstep_project_date', 'project_id', 'date_created'), ) id = Column(GUID, primary_key=True, default=uuid.uuid4) job_id = Column(GUID, ForeignKey('job.id', ondelete="CASCADE"), nullable=False) phase_id = Column(GUID, ForeignKey('jobphase.id', ondelete="CASCADE"), nullable=False) project_id = Column(GUID, ForeignKey('project.id', ondelete="CASCADE"), nullable=False) label = Column(String(128), nullable=False) status = Column(Enum(Status), nullable=False, default=Status.unknown) result = Column(Enum(Result), nullable=False, default=Result.unknown) node_id = Column(GUID, ForeignKey('node.id', ondelete="CASCADE")) # id of JobStep that replaces this JobStep. Usually None, unless a JobStep # fails and is retried. replacement_id = Column(GUID, ForeignKey('jobstep.id', ondelete="CASCADE"), unique=True) # Used (for non-Jenkins builds) in jobstep_allocate to only allocate jobsteps # to slaves of a particular cluster. For Jenkins builds, this is pure documentation (typically # set to the Jenkins label), but should be accurate just the same. cluster = Column(String(128), nullable=True) date_started = Column(DateTime) date_finished = Column(DateTime) date_created = Column(DateTime, default=datetime.utcnow) # The time of the last external interaction indicating progress. last_heartbeat = Column(DateTime) data = Column(JSONEncodedDict) job = relationship('Job') project = relationship('Project') node = relationship('Node') phase = relationship('JobPhase', backref=backref('steps', order_by='JobStep.date_started')) targets = relationship(BazelTarget, backref=backref('step')) __repr__ = model_repr('label') def __init__(self, **kwargs): super(JobStep, self).__init__(**kwargs) if self.id is None: self.id = uuid.uuid4() if self.result is None: self.result = Result.unknown if self.status is None: self.status = Status.unknown if self.date_created is None: self.date_created = datetime.utcnow() if self.data is None: self.data = {} @property def duration(self): """ Return the duration (in milliseconds) that this item was in-progress. """ if self.date_started and self.date_finished: duration = (self.date_finished - self.date_started).total_seconds() * 1000 else: duration = None return duration
[ "import uuid\n", "\n", "from datetime import datetime\n", "from sqlalchemy import Column, String, DateTime, ForeignKey\n", "from sqlalchemy.orm import relationship, backref\n", "from sqlalchemy.schema import Index\n", "\n", "from changes.config import db\n", "from changes.constants import Status, Result\n", "from changes.db.utils import model_repr\n", "from changes.db.types.enum import Enum\n", "from changes.db.types.guid import GUID\n", "from changes.db.types.json import JSONEncodedDict\n", "from changes.models.bazeltarget import BazelTarget\n", "\n", "\n", "class FutureJobStep(object):\n", " def __init__(self, label, commands=None, data=None):\n", " self.label = label\n", " self.commands = commands or []\n", " self.data = data or {}\n", "\n", " def as_jobstep(self, jobphase):\n", " return JobStep(\n", " job_id=jobphase.job_id,\n", " phase=jobphase,\n", " phase_id=jobphase.id,\n", " project_id=jobphase.project_id,\n", " label=self.label,\n", " status=Status.queued,\n", " data=self.data,\n", " )\n", "\n", "\n", "class JobStep(db.Model):\n", " \"\"\"\n", " The most granular unit of work; run on a particular node, has a status and\n", " a result.\n", " \"\"\"\n", " __tablename__ = 'jobstep'\n", "\n", " __table_args__ = (\n", " Index('idx_jobstep_status', 'status'),\n", " Index('idx_jobstep_cluster', 'cluster'),\n", " Index('idx_jobstep_project_date', 'project_id', 'date_created'),\n", " )\n", "\n", " id = Column(GUID, primary_key=True, default=uuid.uuid4)\n", " job_id = Column(GUID, ForeignKey('job.id', ondelete=\"CASCADE\"), nullable=False)\n", " phase_id = Column(GUID, ForeignKey('jobphase.id', ondelete=\"CASCADE\"), nullable=False)\n", " project_id = Column(GUID, ForeignKey('project.id', ondelete=\"CASCADE\"), nullable=False)\n", " label = Column(String(128), nullable=False)\n", " status = Column(Enum(Status), nullable=False, default=Status.unknown)\n", " result = Column(Enum(Result), nullable=False, default=Result.unknown)\n", " node_id = Column(GUID, ForeignKey('node.id', ondelete=\"CASCADE\"))\n", " # id of JobStep that replaces this JobStep. Usually None, unless a JobStep\n", " # fails and is retried.\n", " replacement_id = Column(GUID, ForeignKey('jobstep.id', ondelete=\"CASCADE\"), unique=True)\n", " # Used (for non-Jenkins builds) in jobstep_allocate to only allocate jobsteps\n", " # to slaves of a particular cluster. For Jenkins builds, this is pure documentation (typically\n", " # set to the Jenkins label), but should be accurate just the same.\n", " cluster = Column(String(128), nullable=True)\n", " date_started = Column(DateTime)\n", " date_finished = Column(DateTime)\n", " date_created = Column(DateTime, default=datetime.utcnow)\n", " # The time of the last external interaction indicating progress.\n", " last_heartbeat = Column(DateTime)\n", " data = Column(JSONEncodedDict)\n", "\n", " job = relationship('Job')\n", " project = relationship('Project')\n", " node = relationship('Node')\n", " phase = relationship('JobPhase', backref=backref('steps', order_by='JobStep.date_started'))\n", " targets = relationship(BazelTarget, backref=backref('step'))\n", "\n", " __repr__ = model_repr('label')\n", "\n", " def __init__(self, **kwargs):\n", " super(JobStep, self).__init__(**kwargs)\n", " if self.id is None:\n", " self.id = uuid.uuid4()\n", " if self.result is None:\n", " self.result = Result.unknown\n", " if self.status is None:\n", " self.status = Status.unknown\n", " if self.date_created is None:\n", " self.date_created = datetime.utcnow()\n", " if self.data is None:\n", " self.data = {}\n", "\n", " @property\n", " def duration(self):\n", " \"\"\"\n", " Return the duration (in milliseconds) that this item was in-progress.\n", " \"\"\"\n", " if self.date_started and self.date_finished:\n", " duration = (self.date_finished - self.date_started).total_seconds() * 1000\n", " else:\n", " duration = None\n", " return duration\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0.01098901098901099, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0.010752688172043012, 0.012195121951219513, 0.010101010101010102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010416666666666666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0 ]
100
0.000887
import csv from util.hash_heap import HashHeap, MAX_HEAP from collections import defaultdict, Counter class SaleTerminal: """ A Sale Terminal Calculates the total of items scanned based on a registry """ def __init__(self): """ Public API methods should use other methods of construction, such as from_csv Registry is a dict of HashHeaps to preserve quantity order """ self.registry = defaultdict(HashHeap) self.cart = Counter() def scan(self, product): """ Scan a single item identified by a string If the scanned item is not present in the registry, a ValueError is raised Returns count of scanned object after scanning """ if not product in self.registry: raise ValueError("Product not in registry: "+product) self.cart[product] += 1 return self.cart[product] def scan_bulk(self, bulk): """ Scan in order If any of the products in bulk are not in the registry, none are scanned and an error thrown Returns a dict of the product count for all objects that were scanned """ scanned = set([]) # throw error if not in registry [self.scan(product) for product in bulk if product not in self.registry] # actually scan [(self.scan(product), scanned.add(product)) for product in bulk] return dict([(product, self.cart[product]) for product in scanned]) def total(self): """ Calculates the total of the scanned items Calculation is done when this method is called """ total = 0.0 for product, count in self.xcart_product_counts(): for quantity, price in self.xregistry_quantity_prices(product): total += (count / quantity) * price count = count % quantity if count != 0: raise ValueError("Product does not include a 1 quantity in registry: {0}".format(product)) return total def xregistry_quantity_prices(self, product): """ Generator to get (quantity,price) ordered by quantity in descending order """ return self.registry[product].xpeek(len(self.registry[product])) def xcart_product_counts(self): """ Generator to get all of the (product, count) in the cart """ return ((product, count) for product, count in self.cart.most_common()) def register_product(self, product, quantity, price): """ Register a product in the form of product string, quantity and price The product string is matched during the scan method This method can accept multiple entries for the same product to simulate bulk pricing by varying quantities """ if quantity <= 0: raise ValueError("Cannot add product {0} with a negative or zero quantity: {1}".format(product, quantity)) if price < 0: raise ValueError("Cannot add product {0} with a negative price: {1}".format(product, price)) self.registry[product].push(int(quantity), float(price)) #TODO make a model out of the file instead of this #TODO check that it include a 1 quantity value def from_csv(csv_path): """ Registers products in a batch by a CSV CSV is used to identify product, quantity and price per row """ terminal = SaleTerminal() with open(csv_path) as csv_file: reader = csv.DictReader(csv_file) for row in reader: terminal.register_product(row['product'], row['quantity'], row['price']) return terminal
[ "import csv\n", "from util.hash_heap import HashHeap, MAX_HEAP\n", "from collections import defaultdict, Counter\n", "\n", "class SaleTerminal:\n", " \"\"\" A Sale Terminal\n", " Calculates the total of items scanned based on a registry\n", " \"\"\"\n", "\n", " def __init__(self):\n", " \"\"\" Public API methods should use other methods of construction, such as from_csv\n", " Registry is a dict of HashHeaps to preserve quantity order\n", " \"\"\"\n", " self.registry = defaultdict(HashHeap)\n", " self.cart = Counter()\n", "\n", " def scan(self, product):\n", " \"\"\" Scan a single item identified by a string\n", " If the scanned item is not present in the registry, a ValueError is raised\n", " Returns count of scanned object after scanning\n", " \"\"\"\n", " if not product in self.registry:\n", " raise ValueError(\"Product not in registry: \"+product)\n", " self.cart[product] += 1\n", " return self.cart[product]\n", "\n", " def scan_bulk(self, bulk):\n", " \"\"\" Scan in order\n", " If any of the products in bulk are not in the registry, none are scanned and an error thrown\n", " Returns a dict of the product count for all objects that were scanned\n", " \"\"\"\n", " scanned = set([])\n", " # throw error if not in registry\n", " [self.scan(product) for product in bulk if product not in self.registry]\n", " # actually scan\n", " [(self.scan(product), scanned.add(product)) for product in bulk]\n", " return dict([(product, self.cart[product]) for product in scanned])\n", "\n", " def total(self):\n", " \"\"\" Calculates the total of the scanned items\n", " Calculation is done when this method is called\n", " \"\"\"\n", " total = 0.0\n", " for product, count in self.xcart_product_counts():\n", " for quantity, price in self.xregistry_quantity_prices(product):\n", " total += (count / quantity) * price\n", " count = count % quantity\n", " if count != 0:\n", " raise ValueError(\"Product does not include a 1 quantity in registry: {0}\".format(product))\n", " return total\n", "\n", " def xregistry_quantity_prices(self, product):\n", " \"\"\" Generator to get (quantity,price) ordered by quantity in descending order\n", " \"\"\"\n", " return self.registry[product].xpeek(len(self.registry[product]))\n", "\n", " def xcart_product_counts(self):\n", " \"\"\" Generator to get all of the (product, count) in the cart\n", " \"\"\"\n", " return ((product, count) for product, count in self.cart.most_common())\n", "\n", " def register_product(self, product, quantity, price):\n", " \"\"\" Register a product in the form of product string, quantity and price\n", " The product string is matched during the scan method\n", " This method can accept multiple entries for the same product to simulate bulk pricing by varying quantities\n", " \"\"\"\n", " if quantity <= 0:\n", " raise ValueError(\"Cannot add product {0} with a negative or zero quantity: {1}\".format(product, quantity))\n", " if price < 0:\n", " raise ValueError(\"Cannot add product {0} with a negative price: {1}\".format(product, price))\n", " self.registry[product].push(int(quantity), float(price))\n", "\n", "#TODO make a model out of the file instead of this\n", "#TODO check that it include a 1 quantity value\n", "def from_csv(csv_path):\n", " \"\"\" Registers products in a batch by a CSV\n", " CSV is used to identify product, quantity and price per row\n", " \"\"\"\n", " terminal = SaleTerminal()\n", " with open(csv_path) as csv_file:\n", " reader = csv.DictReader(csv_file)\n", " for row in reader:\n", " terminal.register_product(row['product'], row['quantity'], row['price'])\n", " return terminal\n" ]
[ 0, 0, 0, 0, 0.05, 0.045454545454545456, 0, 0, 0, 0.045454545454545456, 0.011627906976744186, 0, 0, 0, 0, 0, 0.037037037037037035, 0, 0.012048192771084338, 0, 0, 0.02702702702702703, 0.016666666666666666, 0, 0, 0, 0.034482758620689655, 0, 0.009900990099009901, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05263157894736842, 0, 0, 0, 0, 0, 0.014285714285714285, 0, 0, 0.047619047619047616, 0.010101010101010102, 0, 0, 0.020833333333333332, 0.012195121951219513, 0, 0, 0, 0.029411764705882353, 0, 0, 0, 0, 0.017857142857142856, 0, 0, 0.008620689655172414, 0, 0, 0.017699115044247787, 0, 0.020202020202020204, 0, 0, 0.0196078431372549, 0.02127659574468085, 0.041666666666666664, 0.022222222222222223, 0, 0, 0.03571428571428571, 0.02857142857142857, 0, 0, 0.012658227848101266, 0.05555555555555555 ]
84
0.009267
import pytest import sys import os import_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../lib/python/') sys.path.insert(0, os.path.abspath(import_path)) from mklauncher import Mklauncher, LauncherImportance @pytest.fixture def context(): import zmq ctx = zmq.Context() ctx.linger = 0 return ctx @pytest.fixture def single_launcher_file(tmpdir): data = ''' [demo] name = Demo config description = My super demo command = python2 run.py variant = default ''' ini = tmpdir.join('launcher.ini') ini.write(data) return [str(tmpdir)] @pytest.fixture def config_dir(tmpdir): return str(tmpdir.join('config')) def test_reading_single_launcher_file_works(context, single_launcher_file): launcher = Mklauncher(context, launcher_dirs=single_launcher_file) launchers = launcher.container.launcher assert len(launchers) == 1 assert launchers[0].name == 'Demo config' assert launchers[0].description == 'My super demo' assert launchers[0].command == 'python2 run.py' assert launchers[0].info.variant == 'default' @pytest.fixture def valid_importance_file(tmpdir): data = ''' [/foo/bar/baz] myconfig = 10 anotherconfig = 2 ''' ini = tmpdir.join('importances.ini') ini.write(data) return str(ini) def test_reading_launcher_importances_works(valid_importance_file): importances = LauncherImportance(valid_importance_file) importances.load() assert importances['/foo/bar/baz:myconfig'] == 10 assert importances['/foo/bar/baz:anotherconfig'] == 2 assert importances['/foo/bar/baz:Myconfig'] == 10 assert importances['/foo/bar/baz:AnotherConfig'] == 2 def test_writing_launcher_importances_works(tmpdir): save_file = tmpdir.join('test/output.ini') importances = LauncherImportance(str(save_file)) importances['/my/config/path/:config1'] = 10 importances['/my/config/path/:config2'] = 2 importances['/home/alexander/:another_config'] = 0 importances.save() assert os.path.exists(str(save_file)) data = save_file.read() assert '[/my/config/path/]' in data assert '[/home/alexander/]' in data assert 'config1 = 10' in data assert 'config2 = 2' in data assert 'another_config = 0' in data def test_rewriting_launcher_importances_works(valid_importance_file): importances = LauncherImportance(valid_importance_file) importances.load() importances['/foo/bar/baz:myconfig'] = 8 importances.save() assert os.path.exists(valid_importance_file) with open(valid_importance_file) as save_file: data = save_file.read() assert '[/foo/bar/baz]' in data assert 'myconfig = 8' in data def test_regression_paths_with_dot_cause_problems(tmpdir): save_file = tmpdir.join('test/output.ini') importances = LauncherImportance(str(save_file)) importances['./foo/bar/:config1'] = 10 importances['.:config1'] = 2 importances.save() importances.load() importances['./foo/bar/:config1'] == 3 importances.save() assert os.path.exists(str(save_file)) data = save_file.read() assert '[./foo/bar/]' in data assert 'config1 = 3' not in data # ConfigParser causes problems with . in the section name def test_reading_launcher_importances_with_non_existing_file_does_not_throw_error(tmpdir): save_file = tmpdir.join('save.ini') importances = LauncherImportance(str(save_file)) importances.load() def test_updating_launcher_importance_works(context, single_launcher_file, config_dir): launcher = Mklauncher(context, launcher_dirs=single_launcher_file, config_dir=config_dir) from machinetalk.protobuf.config_pb2 import Launcher msg = Launcher() msg.index = 0 msg.importance = 5 launcher._update_importance(msg) launcher._update_launcher_status() launchers = launcher.container.launcher assert len(launchers) == 1 assert launchers[0].importance == 5
[ "import pytest\n", "import sys\n", "import os\n", "\n", "import_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../lib/python/')\n", "sys.path.insert(0, os.path.abspath(import_path))\n", "\n", "from mklauncher import Mklauncher, LauncherImportance\n", "\n", "\n", "@pytest.fixture\n", "def context():\n", " import zmq\n", " ctx = zmq.Context()\n", " ctx.linger = 0\n", " return ctx\n", "\n", "\n", "@pytest.fixture\n", "def single_launcher_file(tmpdir):\n", " data = '''\n", "[demo]\n", "name = Demo config\n", "description = My super demo\n", "command = python2 run.py\n", "variant = default\n", "'''\n", " ini = tmpdir.join('launcher.ini')\n", " ini.write(data)\n", " return [str(tmpdir)]\n", "\n", "\n", "@pytest.fixture\n", "def config_dir(tmpdir):\n", " return str(tmpdir.join('config'))\n", "\n", "\n", "def test_reading_single_launcher_file_works(context, single_launcher_file):\n", " launcher = Mklauncher(context, launcher_dirs=single_launcher_file)\n", "\n", " launchers = launcher.container.launcher\n", " assert len(launchers) == 1\n", " assert launchers[0].name == 'Demo config'\n", " assert launchers[0].description == 'My super demo'\n", " assert launchers[0].command == 'python2 run.py'\n", " assert launchers[0].info.variant == 'default'\n", "\n", "\n", "@pytest.fixture\n", "def valid_importance_file(tmpdir):\n", " data = '''\n", "[/foo/bar/baz]\n", "myconfig = 10\n", "anotherconfig = 2\n", "'''\n", " ini = tmpdir.join('importances.ini')\n", " ini.write(data)\n", " return str(ini)\n", "\n", "\n", "def test_reading_launcher_importances_works(valid_importance_file):\n", " importances = LauncherImportance(valid_importance_file)\n", "\n", " importances.load()\n", "\n", " assert importances['/foo/bar/baz:myconfig'] == 10\n", " assert importances['/foo/bar/baz:anotherconfig'] == 2\n", " assert importances['/foo/bar/baz:Myconfig'] == 10\n", " assert importances['/foo/bar/baz:AnotherConfig'] == 2\n", "\n", "\n", "def test_writing_launcher_importances_works(tmpdir):\n", " save_file = tmpdir.join('test/output.ini')\n", " importances = LauncherImportance(str(save_file))\n", "\n", " importances['/my/config/path/:config1'] = 10\n", " importances['/my/config/path/:config2'] = 2\n", " importances['/home/alexander/:another_config'] = 0\n", " importances.save()\n", "\n", " assert os.path.exists(str(save_file))\n", " data = save_file.read()\n", " assert '[/my/config/path/]' in data\n", " assert '[/home/alexander/]' in data\n", " assert 'config1 = 10' in data\n", " assert 'config2 = 2' in data\n", " assert 'another_config = 0' in data\n", "\n", "\n", "def test_rewriting_launcher_importances_works(valid_importance_file):\n", " importances = LauncherImportance(valid_importance_file)\n", "\n", " importances.load()\n", " importances['/foo/bar/baz:myconfig'] = 8\n", " importances.save()\n", "\n", " assert os.path.exists(valid_importance_file)\n", " with open(valid_importance_file) as save_file:\n", " data = save_file.read()\n", " assert '[/foo/bar/baz]' in data\n", " assert 'myconfig = 8' in data\n", "\n", "\n", "def test_regression_paths_with_dot_cause_problems(tmpdir):\n", " save_file = tmpdir.join('test/output.ini')\n", " importances = LauncherImportance(str(save_file))\n", "\n", " importances['./foo/bar/:config1'] = 10\n", " importances['.:config1'] = 2\n", " importances.save()\n", " importances.load()\n", " importances['./foo/bar/:config1'] == 3\n", " importances.save()\n", "\n", " assert os.path.exists(str(save_file))\n", " data = save_file.read()\n", " assert '[./foo/bar/]' in data\n", " assert 'config1 = 3' not in data # ConfigParser causes problems with . in the section name\n", "\n", "\n", "def test_reading_launcher_importances_with_non_existing_file_does_not_throw_error(tmpdir):\n", " save_file = tmpdir.join('save.ini')\n", " importances = LauncherImportance(str(save_file))\n", "\n", " importances.load()\n", "\n", "\n", "def test_updating_launcher_importance_works(context, single_launcher_file, config_dir):\n", " launcher = Mklauncher(context, launcher_dirs=single_launcher_file, config_dir=config_dir)\n", "\n", " from machinetalk.protobuf.config_pb2 import Launcher\n", " msg = Launcher()\n", " msg.index = 0\n", " msg.importance = 5\n", "\n", " launcher._update_importance(msg)\n", " launcher._update_launcher_status()\n", "\n", " launchers = launcher.container.launcher\n", " assert len(launchers) == 1\n", " assert launchers[0].importance == 5\n" ]
[ 0, 0, 0, 0, 0.010416666666666666, 0, 0, 0.018518518518518517, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010416666666666666, 0, 0, 0.01098901098901099, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0.010638297872340425, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
141
0.000513
# Copyright 2019 The gRPC Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import logging import unittest import time import gc import grpc from grpc.experimental import aio from tests_aio.unit._test_base import AioTestBase from tests.unit.framework.common import test_constants _UNARY_UNARY_ABORT = '/test/UnaryUnaryAbort' _SUPPRESS_ABORT = '/test/SuppressAbort' _REPLACE_ABORT = '/test/ReplaceAbort' _ABORT_AFTER_REPLY = '/test/AbortAfterReply' _REQUEST = b'\x00\x00\x00' _RESPONSE = b'\x01\x01\x01' _NUM_STREAM_RESPONSES = 5 _ABORT_CODE = grpc.StatusCode.RESOURCE_EXHAUSTED _ABORT_DETAILS = 'Phony error details' class _GenericHandler(grpc.GenericRpcHandler): @staticmethod async def _unary_unary_abort(unused_request, context): await context.abort(_ABORT_CODE, _ABORT_DETAILS) raise RuntimeError('This line should not be executed') @staticmethod async def _suppress_abort(unused_request, context): try: await context.abort(_ABORT_CODE, _ABORT_DETAILS) except aio.AbortError as e: pass return _RESPONSE @staticmethod async def _replace_abort(unused_request, context): try: await context.abort(_ABORT_CODE, _ABORT_DETAILS) except aio.AbortError as e: await context.abort(grpc.StatusCode.INVALID_ARGUMENT, 'Override abort!') @staticmethod async def _abort_after_reply(unused_request, context): yield _RESPONSE await context.abort(_ABORT_CODE, _ABORT_DETAILS) raise RuntimeError('This line should not be executed') def service(self, handler_details): if handler_details.method == _UNARY_UNARY_ABORT: return grpc.unary_unary_rpc_method_handler(self._unary_unary_abort) if handler_details.method == _SUPPRESS_ABORT: return grpc.unary_unary_rpc_method_handler(self._suppress_abort) if handler_details.method == _REPLACE_ABORT: return grpc.unary_unary_rpc_method_handler(self._replace_abort) if handler_details.method == _ABORT_AFTER_REPLY: return grpc.unary_stream_rpc_method_handler(self._abort_after_reply) async def _start_test_server(): server = aio.server() port = server.add_insecure_port('[::]:0') server.add_generic_rpc_handlers((_GenericHandler(),)) await server.start() return 'localhost:%d' % port, server class TestAbort(AioTestBase): async def setUp(self): address, self._server = await _start_test_server() self._channel = aio.insecure_channel(address) async def tearDown(self): await self._channel.close() await self._server.stop(None) async def test_unary_unary_abort(self): method = self._channel.unary_unary(_UNARY_UNARY_ABORT) call = method(_REQUEST) self.assertEqual(_ABORT_CODE, await call.code()) self.assertEqual(_ABORT_DETAILS, await call.details()) with self.assertRaises(aio.AioRpcError) as exception_context: await call rpc_error = exception_context.exception self.assertEqual(_ABORT_CODE, rpc_error.code()) self.assertEqual(_ABORT_DETAILS, rpc_error.details()) async def test_suppress_abort(self): method = self._channel.unary_unary(_SUPPRESS_ABORT) call = method(_REQUEST) with self.assertRaises(aio.AioRpcError) as exception_context: await call rpc_error = exception_context.exception self.assertEqual(_ABORT_CODE, rpc_error.code()) self.assertEqual(_ABORT_DETAILS, rpc_error.details()) async def test_replace_abort(self): method = self._channel.unary_unary(_REPLACE_ABORT) call = method(_REQUEST) with self.assertRaises(aio.AioRpcError) as exception_context: await call rpc_error = exception_context.exception self.assertEqual(_ABORT_CODE, rpc_error.code()) self.assertEqual(_ABORT_DETAILS, rpc_error.details()) async def test_abort_after_reply(self): method = self._channel.unary_stream(_ABORT_AFTER_REPLY) call = method(_REQUEST) with self.assertRaises(aio.AioRpcError) as exception_context: await call.read() await call.read() rpc_error = exception_context.exception self.assertEqual(_ABORT_CODE, rpc_error.code()) self.assertEqual(_ABORT_DETAILS, rpc_error.details()) self.assertEqual(_ABORT_CODE, await call.code()) self.assertEqual(_ABORT_DETAILS, await call.details()) if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) unittest.main(verbosity=2)
[ "# Copyright 2019 The gRPC Authors\n", "#\n", "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", "# you may not use this file except in compliance with the License.\n", "# You may obtain a copy of the License at\n", "#\n", "# http://www.apache.org/licenses/LICENSE-2.0\n", "#\n", "# Unless required by applicable law or agreed to in writing, software\n", "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", "# See the License for the specific language governing permissions and\n", "# limitations under the License.\n", "\n", "import asyncio\n", "import logging\n", "import unittest\n", "import time\n", "import gc\n", "\n", "import grpc\n", "from grpc.experimental import aio\n", "from tests_aio.unit._test_base import AioTestBase\n", "from tests.unit.framework.common import test_constants\n", "\n", "_UNARY_UNARY_ABORT = '/test/UnaryUnaryAbort'\n", "_SUPPRESS_ABORT = '/test/SuppressAbort'\n", "_REPLACE_ABORT = '/test/ReplaceAbort'\n", "_ABORT_AFTER_REPLY = '/test/AbortAfterReply'\n", "\n", "_REQUEST = b'\\x00\\x00\\x00'\n", "_RESPONSE = b'\\x01\\x01\\x01'\n", "_NUM_STREAM_RESPONSES = 5\n", "\n", "_ABORT_CODE = grpc.StatusCode.RESOURCE_EXHAUSTED\n", "_ABORT_DETAILS = 'Phony error details'\n", "\n", "\n", "class _GenericHandler(grpc.GenericRpcHandler):\n", "\n", " @staticmethod\n", " async def _unary_unary_abort(unused_request, context):\n", " await context.abort(_ABORT_CODE, _ABORT_DETAILS)\n", " raise RuntimeError('This line should not be executed')\n", "\n", " @staticmethod\n", " async def _suppress_abort(unused_request, context):\n", " try:\n", " await context.abort(_ABORT_CODE, _ABORT_DETAILS)\n", " except aio.AbortError as e:\n", " pass\n", " return _RESPONSE\n", "\n", " @staticmethod\n", " async def _replace_abort(unused_request, context):\n", " try:\n", " await context.abort(_ABORT_CODE, _ABORT_DETAILS)\n", " except aio.AbortError as e:\n", " await context.abort(grpc.StatusCode.INVALID_ARGUMENT,\n", " 'Override abort!')\n", "\n", " @staticmethod\n", " async def _abort_after_reply(unused_request, context):\n", " yield _RESPONSE\n", " await context.abort(_ABORT_CODE, _ABORT_DETAILS)\n", " raise RuntimeError('This line should not be executed')\n", "\n", " def service(self, handler_details):\n", " if handler_details.method == _UNARY_UNARY_ABORT:\n", " return grpc.unary_unary_rpc_method_handler(self._unary_unary_abort)\n", " if handler_details.method == _SUPPRESS_ABORT:\n", " return grpc.unary_unary_rpc_method_handler(self._suppress_abort)\n", " if handler_details.method == _REPLACE_ABORT:\n", " return grpc.unary_unary_rpc_method_handler(self._replace_abort)\n", " if handler_details.method == _ABORT_AFTER_REPLY:\n", " return grpc.unary_stream_rpc_method_handler(self._abort_after_reply)\n", "\n", "\n", "async def _start_test_server():\n", " server = aio.server()\n", " port = server.add_insecure_port('[::]:0')\n", " server.add_generic_rpc_handlers((_GenericHandler(),))\n", " await server.start()\n", " return 'localhost:%d' % port, server\n", "\n", "\n", "class TestAbort(AioTestBase):\n", "\n", " async def setUp(self):\n", " address, self._server = await _start_test_server()\n", " self._channel = aio.insecure_channel(address)\n", "\n", " async def tearDown(self):\n", " await self._channel.close()\n", " await self._server.stop(None)\n", "\n", " async def test_unary_unary_abort(self):\n", " method = self._channel.unary_unary(_UNARY_UNARY_ABORT)\n", " call = method(_REQUEST)\n", "\n", " self.assertEqual(_ABORT_CODE, await call.code())\n", " self.assertEqual(_ABORT_DETAILS, await call.details())\n", "\n", " with self.assertRaises(aio.AioRpcError) as exception_context:\n", " await call\n", "\n", " rpc_error = exception_context.exception\n", " self.assertEqual(_ABORT_CODE, rpc_error.code())\n", " self.assertEqual(_ABORT_DETAILS, rpc_error.details())\n", "\n", " async def test_suppress_abort(self):\n", " method = self._channel.unary_unary(_SUPPRESS_ABORT)\n", " call = method(_REQUEST)\n", "\n", " with self.assertRaises(aio.AioRpcError) as exception_context:\n", " await call\n", "\n", " rpc_error = exception_context.exception\n", " self.assertEqual(_ABORT_CODE, rpc_error.code())\n", " self.assertEqual(_ABORT_DETAILS, rpc_error.details())\n", "\n", " async def test_replace_abort(self):\n", " method = self._channel.unary_unary(_REPLACE_ABORT)\n", " call = method(_REQUEST)\n", "\n", " with self.assertRaises(aio.AioRpcError) as exception_context:\n", " await call\n", "\n", " rpc_error = exception_context.exception\n", " self.assertEqual(_ABORT_CODE, rpc_error.code())\n", " self.assertEqual(_ABORT_DETAILS, rpc_error.details())\n", "\n", " async def test_abort_after_reply(self):\n", " method = self._channel.unary_stream(_ABORT_AFTER_REPLY)\n", " call = method(_REQUEST)\n", "\n", " with self.assertRaises(aio.AioRpcError) as exception_context:\n", " await call.read()\n", " await call.read()\n", "\n", " rpc_error = exception_context.exception\n", " self.assertEqual(_ABORT_CODE, rpc_error.code())\n", " self.assertEqual(_ABORT_DETAILS, rpc_error.details())\n", "\n", " self.assertEqual(_ABORT_CODE, await call.code())\n", " self.assertEqual(_ABORT_DETAILS, await call.details())\n", "\n", "\n", "if __name__ == '__main__':\n", " logging.basicConfig(level=logging.DEBUG)\n", " unittest.main(verbosity=2)\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
151
0.000082
#!/usr/bin/python # python re-implementation of readplotb.f + read10b.f # NOTA: please use python version >=2.6 import sys import getopt import numpy as np import os from postPlot import * from deskdb import * import sqlite3 from sqltable import SQLTable from postPlot import Post_Plot ment=1000 epsilon = 1e-38 zero = 1e-10 ntlint, ntlmax = 4 , 12 iin = -999 iend = -999 class PostProcessing: def __init__(self, database): self.db=database self.Elhc, self.Einj = self.db.execute('SELECT emitn,gamma from six_beta LIMIT 1')[0] def checkInjection(self): if(np.abs(self.Einj)< epsilon): print "ERROR: Injection energy too small" sys.exit() def readplotb(self): dirname=self.db.mk_analysis_dir() rectype=[('six_input_id','int'), ('row_num','int'), ('seed','int'), ('qx','float'),('qy','float'), ('betx','float'),('bety','float'),('sigx1','float'), ('sigy1','float'),('deltap','float'),('emitx','float'), ('emity','float'),('sigxminnld', 'float'), ('sigxavgnld' ,'float') ,('sigxmaxnld', 'float'), ('sigyminnld', 'float'),('sigyavgnld' ,'float'), ('sigymaxnld', 'float'),('betx2','float'), ('bety2','float'),('distp','float'),('dist','float'), ('qx_det','float'),('qy_det','float'),('sturns1' ,'int'), ('sturns2','int'),('turn_max','int'),('amp1','float'), ('amp2','float'),('angle','float'),('smearx','float'), ('smeary','float'),('mtime','float')] names=','.join(zip(*rectype)[0]) turnsl, turnse = self.db.env_var['turnsl'], self.db.env_var['turnse'] tunex, tuney = float(self.db.env_var['tunex']), float(self.db.env_var['tuney']) ns1l, ns2l = self.db.env_var['ns1l'], self.db.env_var['ns2l'] sql='SELECT %s FROM results ORDER BY tunex,tuney,seed,amp1,amp2,angle'%names Elhc,Einj=self.db.execute('SELECT emitn,gamma from six_beta LIMIT 1')[0] anumber=1 seeds, angles= self.db.get_seeds(), self.db.get_angles() mtime=self.db.execute('SELECT max(mtime) from results')[0][0] final=[] post_data=[] ftot = [] sql1='SELECT %s FROM results WHERE betx>0 AND bety>0 AND emitx>0 AND emity>0 AND turn_max=%d '%(names,turnsl) nPlotSeeds = self.db.env_var["iend"] for tunex,tuney in self.db.get_db_tunes(): sixdesktunes="%s_%s"%(tunex,tuney) sql1+=' AND tunex=%s AND tuney=%s '%(tunex,tuney) for angle in angles: fndot='DAres.%s.%s.%s.%d'%(self.db.LHCDescrip,sixdesktunes,turnse,anumber) fndot=os.path.join(dirname,fndot) fhdot = open(fndot, 'w') nSeed=1 for seed in seeds: name2 = "DAres.%s.%s.%s"%(self.db.LHCDescrip,sixdesktunes,turnse) name1= '%s%ss%s%s-%s%s.%d'%(self.db.LHCDescrip,seed,sixdesktunes,ns1l, ns2l, turnse,anumber) ich1, ich2, ich3 = 0, 0, 0 alost1, alost2 = 0., 0. achaos, achaos1 = 0, 0 icount = 1. #------------------------readplot------------------- tl = np.zeros(ntlmax*ntlint+1) al = np.zeros(ntlmax*ntlint+1) ichl =np.zeros(ntlmax*ntlint+1) for i in range(1, ntlmax): for j in range(0,ntlint): tl[(i-1)*ntlint+j] = int(round(10**((i-1)+(j-1)/float(ntlint)))) tl[ntlmax*ntlint]=int(round(10**(float(ntlmax)))) #------------------------readplot------------------- achaos, achaos1 = 0, 0 alost1, alost2 = 0., 0. ilost=0 itest=1 fac=2.0 fac2=0.1 fac3=0.01 #-------------------- checkInjection----------------- self.checkInjection() #-------------------- checkInjection----------------- sql=sql1+' AND seed=%s '%seed sql+=' AND angle=%s '%angle sql+=' ORDER BY amp1 ' inp=np.array(self.db.execute(sql),dtype=rectype) if len(inp)==0: msg="all particle lost for angle = %s and seed = %s" print msg%(angle,seed) continue six_id = inp['six_input_id'] row = inp['row_num'] qx = inp['qx'] qy = inp['qy'] betx = inp['betx'] bety = inp['bety'] dist = inp['dist'] distp = inp['distp'] sigx1 = inp['sigx1'] betx2 = inp['betx2'] bety2 = inp['bety2'] emitx = inp['emitx'] emity = inp['emity'] smeary = inp['smeary'] smearx = inp['smearx'] qx_det = inp['qx_det'] qy_det = inp['qy_det'] sigy1 = inp['sigy1'] deltap = inp['deltap'] sturns1 = inp['sturns1'] sturns2 = inp['sturns2'] turn_max = inp['turn_max'] sigxavgnld = inp['sigxavgnld'] sigyavgnld = inp['sigyavgnld'] sigxmaxnld = inp['sigxmaxnld'] sigxminnld = inp['sigxminnld'] sigymaxnld = inp['sigymaxnld'] sigyminnld = inp['sigyminnld'] xidx=(betx>zero) & (emitx>zero) yidx=(bety>zero) & (emity>zero) # xidx, yidx = len(betx), len(bety) sigx1[xidx]=np.sqrt(betx[xidx]*emitx[xidx]) sigy1[yidx]=np.sqrt(bety[yidx]*emity[yidx]) itest = sum(betx>zero) # itest = len(betx) iel=itest-1 rat=0 #------------------------read10------------------- ############################################# # if sigx1[0]>0: # rat=sigy1[0]**2*betx[0]/(sigx1[0]**2*bety[0]) # if sigx1[0]**2*bety[0]<sigy1[0]**2*betx[0]: # rat=2 ############################################# #------------------------read10------------------- if abs(emitx[0]) < epsilon and abs(sigx1[0])>epsilon and bety > epsilon: rat=sigy1[0]**2*betx[0]/(sigx1[0]**2*bety[0]) if abs(emity[0]) > abs(emitx[0]) or rat > 1e-10: rat=0 dummy=np.copy(betx) betx=bety bety=dummy dummy=np.copy(betx2) betx2=bety2 bety2=dummy dummy=np.copy(sigxminnld) sigxminnld=np.copy(sigyminnld) sigyminnld=dummy dummy=np.copy(sigx1) sigx1=sigy1 sigy1=dummy dummy=np.copy(sigxmaxnld) sigxmaxnld=np.copy(sigymaxnld) sigymaxnld=dummy dummy=np.copy(sigxavgnld) sigxavgnld=sigyavgnld sigyavgnld=dummy dummy=np.copy(emitx) emitx=emity emity=dummy #------------------------ratiosEmittances------------------- sigma=np.sqrt(betx[0]*Elhc/Einj) if abs(emity[0])>0 and abs(sigx1[0])>0: if abs(emitx[0])>= epsilon : eex=emitx[0] eey=emity[0] else: eey=sigy1[0]**2/bety[0] eex=sigx1[0]**2/betx[0] rad=np.sqrt(1+eey/eex)/sigma else: rad=1 if abs(sigxavgnld[0])>zero and abs(bety[0])>zero and sigma > 0: if abs(emitx[0]) < zero : rad1=np.sqrt(1+(sigyavgnld[0]**2*betx[0])/(sigxavgnld[0]**2*bety[0]))/sigma else: rad1=(sigyavgnld[0]*np.sqrt(betx[0])-sigxavgnld[0]*np.sqrt(bety2[0]))/(sigxavgnld[0]*np.sqrt(bety[0])-sigyavgnld[0]*np.sqrt(betx2[0])) rad1=np.sqrt(1+rad1**2)/sigma else: rad1 = 1 #------------------------ratiosEmittances------------------- ############################CHAOTIC BOUNDARIES #------------------------read10------------------- amin, amax = 1/epsilon, zero achaosPlot, achaos1Plot = achaos, achaos1 # f30 = open('fort.30.%d.%d' %(nSeed,anumber),'a') #------------------------read10------------------- for i in range(0,iel+1): #------------------------read10------------------- # if i==0: # achaos=rad*sigx1[i] #OJO, NOMES PER READ10B # achaos1 =achaos #------------------------read10------------------- #------------------------readplot------------------- if abs(sigx1[i]) > epsilon and sigx1[i]<amin: amin = sigx1[i] if abs(sigx1[i]) > epsilon and sigx1[i]>amax: amax=sigx1[i] #------------------------readplot------------------- if ich1 == 0 and (distp[i] > fac or distp[i] < 1./fac): ich1 = 1 achaos=rad*sigx1[i] iin=i if ich3 == 0 and dist[i] > fac3 : ich3=1 iend=i achaos1=rad*sigx1[i] if ich2 == 0 and (sturns1[i]<turn_max[i] or sturns2[i]<turn_max[i]): ich2 = 1 alost2 = rad*sigx1[i] #------------------------readplot------------------- for j in range(0, ntlmax*ntlint+1): if (ichl[j] == 0 and int(round(turn_max[i])) >= tl[j]) and ((int(round(sturns1[i])) < tl[j] or int(round(sturns2[i])) < tl[j])): ichl[j] = 1 al[j-1] = rad*sigx1[i] #------------------------readplot------------------- #------------------------read10------------------- # if i>0: # achaosPlot, achaos1Plot = achaos, achaos1 # f30.write("%s\t%f %f %f %f %f\n"%( name1[:39],rad*sigx1[i],distp[i],achaosPlot,alost2,rad1*sigxavgnld[i])) # f30.close() #------------------------read10------------------- if iin != -999 and iend == -999 : iend=iel if iin != -999 and iend > iin : for i in range(iin,iend+1) : if(abs(rad*sigx1[i])>zero): alost1 += rad1 * sigxavgnld[i]/rad/sigx1[i] if(i!=iend): icount+=1. alost1 = alost1/icount if alost1 >= 1.1 or alost1 <= 0.9: alost1= -1.*alost1 else: alost1 = 1.0 al = abs(alost1)*al alost1 = alost1*alost2 if amin == 1/epsilon: amin = zero amin=amin*rad amax=amax*rad al[al==0]=amax alost3 = turn_max[1] sturns1[sturns1==zero] = 1 sturns2[sturns2==zero] = 1 alost3 = min(alost3, min(sturns1),min(sturns2)) if(seed<10): name1+=" " if(anumber<10): name1+=" " #------------------------readplot------------------- if achaos==0: f14Flag = 0 achaos=amin else: f14Flag = 1 # f14 = open('fort.14.%d.%d' %(nSeed,anumber),'w') # f14.write('%s %s\n'%(achaos,alost3/fac)) # f14.write('%s %s\n'%(achaos,turn_max[0]*fac)) # f14.close() if abs(alost1) < epsilon: alost1=amax if nSeed != (nPlotSeeds +1): for i in range(0, iel+1): post_data.append([six_id[i], row[i], rad*sigx1[i], rad1, alost1, alost2, alost3, achaos, achaos1, amin, amax, f14Flag, sqlite3.Binary(al)]) # sql=("UPDATE {0} SET {1}={2}, {3}={4}, {5}={6}, {7}={8}, {9}={10},"+ # " {11}={12}, {13}={14}, {15}={16}, {17}={18}, {19}={20}, {21}={22} " + # " WHERE six_input_id = {23} AND row_num = {24}").format( # tbl, "rad", (rad*sigx1[i]), "rad1", rad1, "alost1", alost1, # "alost2", alost2, "alost3", alost3, "achaos", achaos, "achaos1", achaos1, # "amin", amin,"amax", amax, 'f14', f14Flag, "al", '?', six_id[i], row[i]) # self.db.conn.cursor().execute(sql, (sqlite3.Binary(al),)) #------------------------readplot------------------- fmt=' %-39s %10.6f %10.6f %10.6f %10.6f %10.6f %10.6f\n' fhdot.write(fmt%( name1[:39],achaos,achaos1,alost1,alost2,rad*sigx1[0],rad*sigx1[iel])) final.append([name2, turnsl, tunex, tuney, int(seed), angle,achaos,achaos1,alost1,alost2, rad*sigx1[0],rad*sigx1[iel],mtime]) nSeed +=1 anumber+=1 fhdot.close() cols=SQLTable.cols_from_fields(tables.Da_Post.fields) # datab=SQLTable(self.db.conn,'da_post',cols,tables.Da_Post.key,recreate=True) datab=SQLTable(self.db.conn,'da_post',cols) datab.insertl(final) cols1 = SQLTable.cols_from_fields(tables.Six_Post.fields) tab1 = SQLTable(self.db.conn,'six_post',cols1,tables.Six_Post.key) tab1.insertl(post_data) sql="""CREATE VIEW IF NOT EXISTS six_post_results AS SELECT * FROM results INNER JOIN six_post ON (results.six_input_id=six_post.six_input_id AND results.row_num=six_post.row_num)""" self.db.execute(sql) def plot(self, name, seed=None, angle=None, tune=None): return Post_Plot(self.db, name, seed, angle, tune)
[ "#!/usr/bin/python\n", "\n", "# python re-implementation of readplotb.f + read10b.f\n", "# NOTA: please use python version >=2.6 \n", "\n", "import sys\n", "import getopt\n", "import numpy as np\n", "import os\n", "from postPlot import *\n", "from deskdb import *\n", "import sqlite3\n", "from sqltable import SQLTable\n", "from postPlot import Post_Plot\n", "\n", "ment=1000\n", "epsilon = 1e-38\n", "zero = 1e-10\n", "ntlint, ntlmax = 4 , 12\n", "iin = -999\n", "iend = -999\n", "\n", "class PostProcessing:\n", " \n", " def __init__(self, database):\n", " self.db=database\n", " self.Elhc, self.Einj = self.db.execute('SELECT emitn,gamma from six_beta LIMIT 1')[0]\n", "\n", " def checkInjection(self):\n", " if(np.abs(self.Einj)< epsilon):\n", " print \"ERROR: Injection energy too small\"\n", " sys.exit()\n", "\n", " def readplotb(self):\n", " dirname=self.db.mk_analysis_dir()\n", " rectype=[('six_input_id','int'), ('row_num','int'),\n", " ('seed','int'), ('qx','float'),('qy','float'),\n", " ('betx','float'),('bety','float'),('sigx1','float'),\n", " ('sigy1','float'),('deltap','float'),('emitx','float'),\n", " ('emity','float'),('sigxminnld', 'float'),\n", " ('sigxavgnld' ,'float') ,('sigxmaxnld', 'float'),\n", " ('sigyminnld', 'float'),('sigyavgnld' ,'float'),\n", " ('sigymaxnld', 'float'),('betx2','float'),\n", " ('bety2','float'),('distp','float'),('dist','float'),\n", " ('qx_det','float'),('qy_det','float'),('sturns1' ,'int'),\n", " ('sturns2','int'),('turn_max','int'),('amp1','float'),\n", " ('amp2','float'),('angle','float'),('smearx','float'),\n", " ('smeary','float'),('mtime','float')]\n", "\n", " names=','.join(zip(*rectype)[0])\n", " turnsl, turnse = self.db.env_var['turnsl'], self.db.env_var['turnse']\n", " tunex, tuney = float(self.db.env_var['tunex']), float(self.db.env_var['tuney'])\n", " ns1l, ns2l = self.db.env_var['ns1l'], self.db.env_var['ns2l']\n", " sql='SELECT %s FROM results ORDER BY tunex,tuney,seed,amp1,amp2,angle'%names\n", " Elhc,Einj=self.db.execute('SELECT emitn,gamma from six_beta LIMIT 1')[0]\n", " anumber=1\n", "\n", " \n", " seeds, angles= self.db.get_seeds(), self.db.get_angles()\n", " mtime=self.db.execute('SELECT max(mtime) from results')[0][0]\n", " final=[]\n", " post_data=[]\n", " ftot = []\n", " sql1='SELECT %s FROM results WHERE betx>0 AND bety>0 AND emitx>0 AND emity>0 AND turn_max=%d '%(names,turnsl)\n", " nPlotSeeds = self.db.env_var[\"iend\"]\n", "\n", " for tunex,tuney in self.db.get_db_tunes():\n", " sixdesktunes=\"%s_%s\"%(tunex,tuney)\n", " sql1+=' AND tunex=%s AND tuney=%s '%(tunex,tuney)\n", " for angle in angles: \n", " fndot='DAres.%s.%s.%s.%d'%(self.db.LHCDescrip,sixdesktunes,turnse,anumber)\n", " fndot=os.path.join(dirname,fndot)\n", " fhdot = open(fndot, 'w')\n", " nSeed=1\n", " for seed in seeds:\n", " name2 = \"DAres.%s.%s.%s\"%(self.db.LHCDescrip,sixdesktunes,turnse)\n", " name1= '%s%ss%s%s-%s%s.%d'%(self.db.LHCDescrip,seed,sixdesktunes,ns1l, ns2l, turnse,anumber)\n", " ich1, ich2, ich3 = 0, 0, 0\n", " alost1, alost2 = 0., 0.\n", " achaos, achaos1 = 0, 0\n", " icount = 1.\n", "\n", " #------------------------readplot-------------------\n", " tl = np.zeros(ntlmax*ntlint+1)\n", " al = np.zeros(ntlmax*ntlint+1)\n", " ichl =np.zeros(ntlmax*ntlint+1)\n", " for i in range(1, ntlmax):\n", " for j in range(0,ntlint):\n", " tl[(i-1)*ntlint+j] = int(round(10**((i-1)+(j-1)/float(ntlint))))\n", " tl[ntlmax*ntlint]=int(round(10**(float(ntlmax))))\n", " #------------------------readplot-------------------\n", "\n", " achaos, achaos1 = 0, 0\n", " alost1, alost2 = 0., 0.\n", " ilost=0\n", " itest=1\n", " fac=2.0\n", " fac2=0.1\n", " fac3=0.01\n", "\n", "\n", " #-------------------- checkInjection-----------------\n", " self.checkInjection()\n", " #-------------------- checkInjection-----------------\n", "\n", " sql=sql1+' AND seed=%s '%seed\n", " sql+=' AND angle=%s '%angle\n", " sql+=' ORDER BY amp1 '\n", " inp=np.array(self.db.execute(sql),dtype=rectype)\n", " \n", " if len(inp)==0:\n", " msg=\"all particle lost for angle = %s and seed = %s\"\n", " print msg%(angle,seed)\n", " continue\n", "\n", " six_id = inp['six_input_id']\n", " row = inp['row_num']\n", " qx = inp['qx']\n", " qy = inp['qy']\n", " betx = inp['betx']\n", " bety = inp['bety']\n", " dist = inp['dist']\n", " distp = inp['distp']\n", " sigx1 = inp['sigx1']\n", " betx2 = inp['betx2']\n", " bety2 = inp['bety2']\n", " emitx = inp['emitx']\n", " emity = inp['emity']\n", " smeary = inp['smeary']\n", " smearx = inp['smearx']\n", " qx_det = inp['qx_det']\n", " qy_det = inp['qy_det']\n", " sigy1 = inp['sigy1']\n", " deltap = inp['deltap']\n", " sturns1 = inp['sturns1']\n", " sturns2 = inp['sturns2']\n", " turn_max = inp['turn_max']\n", " sigxavgnld = inp['sigxavgnld']\n", " sigyavgnld = inp['sigyavgnld']\n", " sigxmaxnld = inp['sigxmaxnld']\n", " sigxminnld = inp['sigxminnld']\n", " sigymaxnld = inp['sigymaxnld']\n", " sigyminnld = inp['sigyminnld']\n", "\n", "\n", " xidx=(betx>zero) & (emitx>zero)\n", " yidx=(bety>zero) & (emity>zero)\n", " # xidx, yidx = len(betx), len(bety)\n", " sigx1[xidx]=np.sqrt(betx[xidx]*emitx[xidx])\n", " sigy1[yidx]=np.sqrt(bety[yidx]*emity[yidx])\n", " itest = sum(betx>zero)\n", " # itest = len(betx)\n", " iel=itest-1 \n", " rat=0\n", "\n", " #------------------------read10-------------------\n", " #############################################\n", " # if sigx1[0]>0:\n", " # rat=sigy1[0]**2*betx[0]/(sigx1[0]**2*bety[0])\n", " # if sigx1[0]**2*bety[0]<sigy1[0]**2*betx[0]:\n", " # rat=2\n", " #############################################\n", " #------------------------read10-------------------\n", " \n", " if abs(emitx[0]) < epsilon and abs(sigx1[0])>epsilon and bety > epsilon: \n", " rat=sigy1[0]**2*betx[0]/(sigx1[0]**2*bety[0])\n", " if abs(emity[0]) > abs(emitx[0]) or rat > 1e-10:\n", " rat=0\n", " dummy=np.copy(betx)\n", " betx=bety\n", " bety=dummy\n", " dummy=np.copy(betx2)\n", " betx2=bety2\n", " bety2=dummy\n", " dummy=np.copy(sigxminnld)\n", " sigxminnld=np.copy(sigyminnld)\n", " sigyminnld=dummy\n", " dummy=np.copy(sigx1)\n", " sigx1=sigy1\n", " sigy1=dummy\n", " dummy=np.copy(sigxmaxnld)\n", " sigxmaxnld=np.copy(sigymaxnld)\n", " sigymaxnld=dummy\n", " dummy=np.copy(sigxavgnld)\n", " sigxavgnld=sigyavgnld\n", " sigyavgnld=dummy\n", " dummy=np.copy(emitx) \n", " emitx=emity\n", " emity=dummy\n", "\n", " #------------------------ratiosEmittances------------------- \n", " sigma=np.sqrt(betx[0]*Elhc/Einj)\n", " if abs(emity[0])>0 and abs(sigx1[0])>0:\n", " if abs(emitx[0])>= epsilon :\n", " eex=emitx[0]\n", " eey=emity[0]\n", " else:\n", " eey=sigy1[0]**2/bety[0]\n", " eex=sigx1[0]**2/betx[0]\n", " rad=np.sqrt(1+eey/eex)/sigma\n", " else:\n", " rad=1\n", " if abs(sigxavgnld[0])>zero and abs(bety[0])>zero and sigma > 0:\n", " if abs(emitx[0]) < zero :\n", " rad1=np.sqrt(1+(sigyavgnld[0]**2*betx[0])/(sigxavgnld[0]**2*bety[0]))/sigma\n", " else:\n", " rad1=(sigyavgnld[0]*np.sqrt(betx[0])-sigxavgnld[0]*np.sqrt(bety2[0]))/(sigxavgnld[0]*np.sqrt(bety[0])-sigyavgnld[0]*np.sqrt(betx2[0]))\n", " rad1=np.sqrt(1+rad1**2)/sigma\n", " else:\n", " rad1 = 1\n", " #------------------------ratiosEmittances------------------- \n", " ############################CHAOTIC BOUNDARIES\n", " #------------------------read10-------------------\n", " amin, amax = 1/epsilon, zero\n", " achaosPlot, achaos1Plot = achaos, achaos1\n", " # f30 = open('fort.30.%d.%d' %(nSeed,anumber),'a') \n", " #------------------------read10-------------------\n", "\n", " for i in range(0,iel+1):\n", " #------------------------read10-------------------\n", " # if i==0:\n", " # achaos=rad*sigx1[i] #OJO, NOMES PER READ10B\n", " # achaos1 =achaos\n", " #------------------------read10-------------------\n", " #------------------------readplot-------------------\n", " if abs(sigx1[i]) > epsilon and sigx1[i]<amin:\n", " amin = sigx1[i]\n", " if abs(sigx1[i]) > epsilon and sigx1[i]>amax:\n", " amax=sigx1[i]\n", " #------------------------readplot-------------------\n", " if ich1 == 0 and (distp[i] > fac or distp[i] < 1./fac): \n", " ich1 = 1\n", " achaos=rad*sigx1[i]\n", " iin=i\n", " if ich3 == 0 and dist[i] > fac3 :\n", " ich3=1\n", " iend=i\n", " achaos1=rad*sigx1[i]\n", " if ich2 == 0 and (sturns1[i]<turn_max[i] or sturns2[i]<turn_max[i]):\n", " ich2 = 1\n", " alost2 = rad*sigx1[i]\n", " #------------------------readplot-------------------\n", " for j in range(0, ntlmax*ntlint+1):\n", " if (ichl[j] == 0 and int(round(turn_max[i])) >= tl[j]) and ((int(round(sturns1[i])) < tl[j] or int(round(sturns2[i])) < tl[j])):\n", " ichl[j] = 1\n", " al[j-1] = rad*sigx1[i]\n", " #------------------------readplot-------------------\n", " #------------------------read10------------------- \n", " # if i>0:\n", " # achaosPlot, achaos1Plot = achaos, achaos1\n", " # f30.write(\"%s\\t%f %f %f %f %f\\n\"%( name1[:39],rad*sigx1[i],distp[i],achaosPlot,alost2,rad1*sigxavgnld[i]))\n", " # f30.close()\n", " #------------------------read10-------------------\n", "\n", " if iin != -999 and iend == -999 : iend=iel \n", " if iin != -999 and iend > iin : \n", " for i in range(iin,iend+1) :\n", " if(abs(rad*sigx1[i])>zero):\n", " alost1 += rad1 * sigxavgnld[i]/rad/sigx1[i]\n", " if(i!=iend):\n", " icount+=1.\n", " alost1 = alost1/icount\n", " if alost1 >= 1.1 or alost1 <= 0.9: alost1= -1.*alost1\n", " else:\n", " alost1 = 1.0\n", "\n", " al = abs(alost1)*al\n", " alost1 = alost1*alost2\n", "\n", " if amin == 1/epsilon: amin = zero\n", " amin=amin*rad\n", " amax=amax*rad\n", "\n", " al[al==0]=amax\n", " alost3 = turn_max[1]\n", " sturns1[sturns1==zero] = 1\n", " sturns2[sturns2==zero] = 1 \n", " alost3 = min(alost3, min(sturns1),min(sturns2))\n", "\n", " if(seed<10):\n", " name1+=\" \"\n", " if(anumber<10):\n", " name1+=\" \" \n", "\n", " #------------------------readplot------------------- \n", " if achaos==0:\n", " f14Flag = 0 \n", " achaos=amin\n", " else:\n", " f14Flag = 1\n", " # f14 = open('fort.14.%d.%d' %(nSeed,anumber),'w')\n", " # f14.write('%s %s\\n'%(achaos,alost3/fac))\n", " # f14.write('%s %s\\n'%(achaos,turn_max[0]*fac))\n", " # f14.close()\n", "\n", " if abs(alost1) < epsilon: alost1=amax\n", " if nSeed != (nPlotSeeds +1):\n", " for i in range(0, iel+1):\n", " post_data.append([six_id[i], row[i], rad*sigx1[i], rad1, alost1, \n", " alost2, alost3, achaos, achaos1, \n", " amin, amax, f14Flag, sqlite3.Binary(al)])\n", " \n", " # sql=(\"UPDATE {0} SET {1}={2}, {3}={4}, {5}={6}, {7}={8}, {9}={10},\"+\n", " # \" {11}={12}, {13}={14}, {15}={16}, {17}={18}, {19}={20}, {21}={22} \" +\n", " # \" WHERE six_input_id = {23} AND row_num = {24}\").format(\n", " # tbl, \"rad\", (rad*sigx1[i]), \"rad1\", rad1, \"alost1\", alost1, \n", " # \"alost2\", alost2, \"alost3\", alost3, \"achaos\", achaos, \"achaos1\", achaos1, \n", " # \"amin\", amin,\"amax\", amax, 'f14', f14Flag, \"al\", '?', six_id[i], row[i])\n", " # self.db.conn.cursor().execute(sql, (sqlite3.Binary(al),))\n", " #------------------------readplot------------------- \n", "\n", " fmt=' %-39s %10.6f %10.6f %10.6f %10.6f %10.6f %10.6f\\n'\n", " fhdot.write(fmt%( name1[:39],achaos,achaos1,alost1,alost2,rad*sigx1[0],rad*sigx1[iel]))\n", " final.append([name2, turnsl, tunex, tuney, int(seed),\n", " angle,achaos,achaos1,alost1,alost2,\n", " rad*sigx1[0],rad*sigx1[iel],mtime])\n", " \n", " nSeed +=1\n", " anumber+=1\n", " fhdot.close()\n", " cols=SQLTable.cols_from_fields(tables.Da_Post.fields)\n", " # datab=SQLTable(self.db.conn,'da_post',cols,tables.Da_Post.key,recreate=True)\n", " datab=SQLTable(self.db.conn,'da_post',cols)\n", " datab.insertl(final)\n", "\n", " cols1 = SQLTable.cols_from_fields(tables.Six_Post.fields)\n", " tab1 = SQLTable(self.db.conn,'six_post',cols1,tables.Six_Post.key)\n", " tab1.insertl(post_data)\n", "\n", " sql=\"\"\"CREATE VIEW IF NOT EXISTS six_post_results\n", " AS SELECT * FROM results INNER JOIN six_post\n", " ON (results.six_input_id=six_post.six_input_id AND results.row_num=six_post.row_num)\"\"\"\n", " self.db.execute(sql)\n", "\n", " def plot(self, name, seed=None, angle=None, tune=None):\n", " return Post_Plot(self.db, name, seed, angle, tune)\n" ]
[ 0, 0, 0, 0.023255813953488372, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.041666666666666664, 0.08333333333333333, 0, 0, 0.045454545454545456, 0.2, 0, 0.04, 0.010638297872340425, 0, 0, 0.025, 0.017241379310344827, 0, 0, 0, 0.023809523809523808, 0.05, 0.0625, 0.07142857142857142, 0.0684931506849315, 0.03333333333333333, 0.05970149253731343, 0.045454545454545456, 0.03333333333333333, 0.07042253521126761, 0.08, 0.06944444444444445, 0.06944444444444445, 0.05454545454545454, 0, 0.024390243902439025, 0, 0.011363636363636364, 0, 0.03529411764705882, 0.037037037037037035, 0.05555555555555555, 0, 0.1111111111111111, 0.03076923076923077, 0.014285714285714285, 0.058823529411764705, 0.047619047619047616, 0, 0.03389830508474576, 0, 0, 0.0196078431372549, 0.06382978723404255, 0.04838709677419355, 0.02564102564102564, 0.06593406593406594, 0.04, 0, 0.041666666666666664, 0, 0.046511627906976744, 0.061946902654867256, 0, 0, 0, 0, 0, 0.017543859649122806, 0, 0, 0.019230769230769232, 0, 0.041666666666666664, 0.021505376344086023, 0.014285714285714285, 0.017543859649122806, 0, 0, 0, 0.03571428571428571, 0.03571428571428571, 0.03571428571428571, 0.034482758620689655, 0.03333333333333333, 0, 0, 0.034482758620689655, 0, 0.017241379310344827, 0, 0.04, 0.041666666666666664, 0.023255813953488372, 0.028985507246376812, 0.047619047619047616, 0.027777777777777776, 0.038461538461538464, 0.0625, 0.029411764705882353, 0, 0, 0.023809523809523808, 0.02702702702702703, 0.02702702702702703, 0, 0, 0, 0.023809523809523808, 0.023809523809523808, 0.023809523809523808, 0.023809523809523808, 0.023809523809523808, 0.023809523809523808, 0, 0, 0, 0, 0.023809523809523808, 0, 0.020833333333333332, 0.020833333333333332, 0.02040816326530612, 0, 0, 0, 0, 0, 0, 0, 0, 0.07692307692307693, 0.057692307692307696, 0, 0.015625, 0.015625, 0.023255813953488372, 0, 0.05555555555555555, 0.038461538461538464, 0, 0.01818181818181818, 0, 0, 0, 0, 0, 0, 0.01818181818181818, 0.047619047619047616, 0.031578947368421054, 0.014285714285714285, 0, 0.03333333333333333, 0.022727272727272728, 0.029411764705882353, 0.02857142857142857, 0.022222222222222223, 0.027777777777777776, 0.027777777777777776, 0.02, 0.01818181818181818, 0.024390243902439025, 0.022222222222222223, 0.027777777777777776, 0.027777777777777776, 0.02, 0.01818181818181818, 0.024390243902439025, 0.02, 0.021739130434782608, 0.024390243902439025, 0.043478260869565216, 0.027777777777777776, 0.027777777777777776, 0, 0.022988505747126436, 0.018867924528301886, 0.03333333333333333, 0.03773584905660377, 0.024390243902439025, 0.024390243902439025, 0, 0.019230769230769232, 0.019230769230769232, 0.018867924528301886, 0, 0.03333333333333333, 0.03571428571428571, 0.02, 0.019230769230769232, 0, 0.012269938650306749, 0.017241379310344827, 0, 0, 0.022988505747126436, 0.0196078431372549, 0.01818181818181818, 0, 0, 0.013513513513513514, 0.01818181818181818, 0, 0.022222222222222223, 0.03636363636363636, 0, 0, 0, 0.03636363636363636, 0.03508771929824561, 0.014285714285714285, 0.020833333333333332, 0.014285714285714285, 0.043478260869565216, 0.017543859649122806, 0.012345679012345678, 0, 0.020833333333333332, 0.029411764705882353, 0.017241379310344827, 0.02857142857142857, 0.02857142857142857, 0.02040816326530612, 0.03225806451612903, 0, 0, 0.017543859649122806, 0, 0.019230769230769232, 0.023809523809523808, 0.018867924528301886, 0.017543859649122806, 0.025974025974025976, 0, 0.014285714285714285, 0.007518796992481203, 0, 0.01818181818181818, 0, 0.06153846153846154, 0.03571428571428571, 0.03773584905660377, 0.017857142857142856, 0, 0.024390243902439025, 0.023255813953488372, 0, 0.02531645569620253, 0, 0, 0, 0, 0, 0, 0.018518518518518517, 0.029411764705882353, 0.029411764705882353, 0, 0.05714285714285714, 0, 0.02127659574468085, 0.03389830508474576, 0.014705882352941176, 0, 0.030303030303030304, 0.02857142857142857, 0.027777777777777776, 0.05555555555555555, 0, 0.02531645569620253, 0.029411764705882353, 0.02702702702702703, 0.027777777777777776, 0, 0, 0, 0, 0, 0, 0, 0.034482758620689655, 0.02040816326530612, 0, 0.02127659574468085, 0.029411764705882353, 0.013333333333333334, 0.034482758620689655, 0.010101010101010102, 0.009523809523809525, 0.01098901098901099, 0.021052631578947368, 0.01834862385321101, 0.009259259259259259, 0.011363636363636364, 0.02531645569620253, 0, 0.024096385542168676, 0.08333333333333333, 0, 0.07042253521126761, 0.04225352112676056, 0.047619047619047616, 0.03333333333333333, 0.037037037037037035, 0, 0.016129032258064516, 0, 0.057692307692307696, 0, 0, 0, 0.04, 0, 0, 0.017241379310344827, 0, 0.01020408163265306, 0, 0, 0, 0 ]
336
0.019949
from portage.cache import fs_template from portage.cache import cache_errors from portage import os from portage import _encodings from portage import _unicode_encode import codecs import errno import stat import sys if sys.hexversion >= 0x3000000: long = int # store the current key order *here*. class database(fs_template.FsBased): autocommits = True # do not screw with this ordering. _eclasses_ needs to be last auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI', 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION', 'KEYWORDS', 'IUSE', 'UNUSED_00', 'PDEPEND', 'PROVIDE', 'EAPI', 'PROPERTIES', 'DEFINED_PHASES') def __init__(self, *args, **config): super(database,self).__init__(*args, **config) self.location = os.path.join(self.location, self.label.lstrip(os.path.sep).rstrip(os.path.sep)) if len(self._known_keys) > len(self.auxdbkey_order) + 2: raise Exception("less ordered keys then auxdbkeys") if not os.path.exists(self.location): self._ensure_dirs() def _getitem(self, cpv): d = {} try: myf = codecs.open(_unicode_encode(os.path.join(self.location, cpv), encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['repo.content'], errors='replace') for k,v in zip(self.auxdbkey_order, myf): d[k] = v.rstrip("\n") except (OSError, IOError) as e: if errno.ENOENT == e.errno: raise KeyError(cpv) raise cache_errors.CacheCorruption(cpv, e) try: d["_mtime_"] = os.fstat(myf.fileno())[stat.ST_MTIME] except OSError as e: myf.close() raise cache_errors.CacheCorruption(cpv, e) myf.close() return d def _setitem(self, cpv, values): s = cpv.rfind("/") fp=os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:])) try: myf = codecs.open(_unicode_encode(fp, encoding=_encodings['fs'], errors='strict'), mode='w', encoding=_encodings['repo.content'], errors='backslashreplace') except (OSError, IOError) as e: if errno.ENOENT == e.errno: try: self._ensure_dirs(cpv) myf = codecs.open(_unicode_encode(fp, encoding=_encodings['fs'], errors='strict'), mode='w', encoding=_encodings['repo.content'], errors='backslashreplace') except (OSError, IOError) as e: raise cache_errors.CacheCorruption(cpv, e) else: raise cache_errors.CacheCorruption(cpv, e) for x in self.auxdbkey_order: myf.write(values.get(x,"")+"\n") myf.close() self._ensure_access(fp, mtime=values["_mtime_"]) #update written. now we move it. new_fp = os.path.join(self.location,cpv) try: os.rename(fp, new_fp) except (OSError, IOError) as e: os.remove(fp) raise cache_errors.CacheCorruption(cpv, e) def _delitem(self, cpv): try: os.remove(os.path.join(self.location,cpv)) except OSError as e: if errno.ENOENT == e.errno: raise KeyError(cpv) else: raise cache_errors.CacheCorruption(cpv, e) def __contains__(self, cpv): return os.path.exists(os.path.join(self.location, cpv)) def __iter__(self): """generator for walking the dir struct""" dirs = [self.location] len_base = len(self.location) while len(dirs): for l in os.listdir(dirs[0]): if l.endswith(".cpickle"): continue p = os.path.join(dirs[0],l) st = os.lstat(p) if stat.S_ISDIR(st.st_mode): dirs.append(p) continue yield p[len_base+1:] dirs.pop(0) def commit(self): pass
[ "from portage.cache import fs_template\n", "from portage.cache import cache_errors\n", "from portage import os\n", "from portage import _encodings\n", "from portage import _unicode_encode\n", "import codecs\n", "import errno\n", "import stat\n", "import sys\n", "\n", "if sys.hexversion >= 0x3000000:\n", "\tlong = int\n", "\n", "# store the current key order *here*.\n", "class database(fs_template.FsBased):\n", "\n", "\tautocommits = True\n", "\n", "\t# do not screw with this ordering. _eclasses_ needs to be last\n", "\tauxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',\n", "\t\t'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',\n", "\t\t'KEYWORDS', 'IUSE', 'UNUSED_00',\n", "\t\t'PDEPEND', 'PROVIDE', 'EAPI', 'PROPERTIES', 'DEFINED_PHASES')\n", "\n", "\tdef __init__(self, *args, **config):\n", "\t\tsuper(database,self).__init__(*args, **config)\n", "\t\tself.location = os.path.join(self.location, \n", "\t\t\tself.label.lstrip(os.path.sep).rstrip(os.path.sep))\n", "\n", "\t\tif len(self._known_keys) > len(self.auxdbkey_order) + 2:\n", "\t\t\traise Exception(\"less ordered keys then auxdbkeys\")\n", "\t\tif not os.path.exists(self.location):\n", "\t\t\tself._ensure_dirs()\n", "\n", "\n", "\tdef _getitem(self, cpv):\n", "\t\td = {}\n", "\t\ttry:\n", "\t\t\tmyf = codecs.open(_unicode_encode(os.path.join(self.location, cpv),\n", "\t\t\t\tencoding=_encodings['fs'], errors='strict'),\n", "\t\t\t\tmode='r', encoding=_encodings['repo.content'],\n", "\t\t\t\terrors='replace')\n", "\t\t\tfor k,v in zip(self.auxdbkey_order, myf):\n", "\t\t\t\td[k] = v.rstrip(\"\\n\")\n", "\t\texcept (OSError, IOError) as e:\n", "\t\t\tif errno.ENOENT == e.errno:\n", "\t\t\t\traise KeyError(cpv)\n", "\t\t\traise cache_errors.CacheCorruption(cpv, e)\n", "\n", "\t\ttry:\n", "\t\t\td[\"_mtime_\"] = os.fstat(myf.fileno())[stat.ST_MTIME]\n", "\t\texcept OSError as e:\t\n", "\t\t\tmyf.close()\n", "\t\t\traise cache_errors.CacheCorruption(cpv, e)\n", "\t\tmyf.close()\n", "\t\treturn d\n", "\n", "\n", "\tdef _setitem(self, cpv, values):\n", "\t\ts = cpv.rfind(\"/\")\n", "\t\tfp=os.path.join(self.location,cpv[:s],\".update.%i.%s\" % (os.getpid(), cpv[s+1:]))\n", "\t\ttry:\n", "\t\t\tmyf = codecs.open(_unicode_encode(fp,\n", "\t\t\t\tencoding=_encodings['fs'], errors='strict'),\n", "\t\t\t\tmode='w', encoding=_encodings['repo.content'],\n", "\t\t\t\terrors='backslashreplace')\n", "\t\texcept (OSError, IOError) as e:\n", "\t\t\tif errno.ENOENT == e.errno:\n", "\t\t\t\ttry:\n", "\t\t\t\t\tself._ensure_dirs(cpv)\n", "\t\t\t\t\tmyf = codecs.open(_unicode_encode(fp,\n", "\t\t\t\t\t\tencoding=_encodings['fs'], errors='strict'),\n", "\t\t\t\t\t\tmode='w', encoding=_encodings['repo.content'],\n", "\t\t\t\t\t\terrors='backslashreplace')\n", "\t\t\t\texcept (OSError, IOError) as e:\n", "\t\t\t\t\traise cache_errors.CacheCorruption(cpv, e)\n", "\t\t\telse:\n", "\t\t\t\traise cache_errors.CacheCorruption(cpv, e)\n", "\t\t\n", "\n", "\t\tfor x in self.auxdbkey_order:\n", "\t\t\tmyf.write(values.get(x,\"\")+\"\\n\")\n", "\n", "\t\tmyf.close()\n", "\t\tself._ensure_access(fp, mtime=values[\"_mtime_\"])\n", "\t\t#update written. now we move it.\n", "\t\tnew_fp = os.path.join(self.location,cpv)\n", "\t\ttry:\n", "\t\t\tos.rename(fp, new_fp)\n", "\t\texcept (OSError, IOError) as e:\n", "\t\t\tos.remove(fp)\n", "\t\t\traise cache_errors.CacheCorruption(cpv, e)\n", "\n", "\n", "\tdef _delitem(self, cpv):\n", "\t\ttry:\n", "\t\t\tos.remove(os.path.join(self.location,cpv))\n", "\t\texcept OSError as e:\n", "\t\t\tif errno.ENOENT == e.errno:\n", "\t\t\t\traise KeyError(cpv)\n", "\t\t\telse:\n", "\t\t\t\traise cache_errors.CacheCorruption(cpv, e)\n", "\n", "\n", "\tdef __contains__(self, cpv):\n", "\t\treturn os.path.exists(os.path.join(self.location, cpv))\n", "\n", "\n", "\tdef __iter__(self):\n", "\t\t\"\"\"generator for walking the dir struct\"\"\"\n", "\t\tdirs = [self.location]\n", "\t\tlen_base = len(self.location)\n", "\t\twhile len(dirs):\n", "\t\t\tfor l in os.listdir(dirs[0]):\n", "\t\t\t\tif l.endswith(\".cpickle\"):\n", "\t\t\t\t\tcontinue\n", "\t\t\t\tp = os.path.join(dirs[0],l)\n", "\t\t\t\tst = os.lstat(p)\n", "\t\t\t\tif stat.S_ISDIR(st.st_mode):\n", "\t\t\t\t\tdirs.append(p)\n", "\t\t\t\t\tcontinue\n", "\t\t\t\tyield p[len_base+1:]\n", "\t\t\tdirs.pop(0)\n", "\n", "\n", "\tdef commit(self):\tpass\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.08333333333333333, 0, 0, 0.02702702702702703, 0, 0.05, 0, 0.015625, 0.03508771929824561, 0.037037037037037035, 0.05555555555555555, 0.030303030303030304, 0, 0.02631578947368421, 0.04081632653061224, 0.0425531914893617, 0.03636363636363636, 0, 0.01694915254237288, 0.01818181818181818, 0.025, 0.043478260869565216, 0, 0, 0.07692307692307693, 0.1111111111111111, 0.14285714285714285, 0.014084507042253521, 0.04081632653061224, 0.0196078431372549, 0.045454545454545456, 0.044444444444444446, 0.038461538461538464, 0.029411764705882353, 0.03225806451612903, 0.041666666666666664, 0.021739130434782608, 0, 0.14285714285714285, 0.017857142857142856, 0.08333333333333333, 0.06666666666666667, 0.021739130434782608, 0.07142857142857142, 0.09090909090909091, 0, 0, 0.058823529411764705, 0.047619047619047616, 0.05952380952380952, 0.14285714285714285, 0.024390243902439025, 0.04081632653061224, 0.0196078431372549, 0.03225806451612903, 0.029411764705882353, 0.03225806451612903, 0.1111111111111111, 0.03571428571428571, 0.023255813953488372, 0.0392156862745098, 0.018867924528301886, 0.030303030303030304, 0.027777777777777776, 0.020833333333333332, 0.1111111111111111, 0.02127659574468085, 0.6666666666666666, 0, 0.0625, 0.05555555555555555, 0, 0.07142857142857142, 0.0196078431372549, 0.05555555555555555, 0.046511627906976744, 0.14285714285714285, 0.04, 0.029411764705882353, 0.058823529411764705, 0.021739130434782608, 0, 0, 0.07692307692307693, 0.14285714285714285, 0.043478260869565216, 0.043478260869565216, 0.03225806451612903, 0.041666666666666664, 0.1111111111111111, 0.02127659574468085, 0, 0, 0.06666666666666667, 0.017241379310344827, 0, 0, 0.09523809523809523, 0.022222222222222223, 0.04, 0.03125, 0.05263157894736842, 0.06060606060606061, 0.03225806451612903, 0.07142857142857142, 0.0625, 0.047619047619047616, 0.030303030303030304, 0.05, 0.07142857142857142, 0.04, 0.06666666666666667, 0, 0, 0.125 ]
126
0.043104
__all__ = ['eztv','nzbmatrix','nzbs_org','tvbinz','nzbsrus','binreq','womble','newzbin'] import sickbeard from os import sys def sortedProviderList(): initialList = sickbeard.providerList + sickbeard.newznabProviderList providerDict = dict(zip([x.getID() for x in initialList], initialList)) newList = [] # add all modules in the priority list, in order for curModule in sickbeard.PROVIDER_ORDER: if curModule in providerDict: newList.append(providerDict[curModule]) # add any modules that are missing from that list for curModule in providerDict: if providerDict[curModule] not in newList: newList.append(providerDict[curModule]) return newList def makeProviderList(): return [x.provider for x in [getProviderModule(y) for y in __all__] if x] def getNewznabProviderList(data): defaultList = [makeNewznabProvider(x) for x in getDefaultNewznabProviders().split('!!!')] providerList = filter(lambda x: x, [makeNewznabProvider(x) for x in data.split('!!!')]) providerDict = dict(zip([x.name for x in providerList], providerList)) for curDefault in defaultList: if not curDefault: continue if curDefault.name not in providerDict: curDefault.default = True providerList.append(curDefault) else: providerDict[curDefault.name].default = True providerDict[curDefault.name].name = curDefault.name providerDict[curDefault.name].url = curDefault.url return filter(lambda x: x, providerList) def makeNewznabProvider(configString): if not configString: return None name, url, key, enabled = configString.split('|') newznab = sys.modules['sickbeard.providers.newznab'] newProvider = newznab.NewznabProvider(name, url) newProvider.key = key newProvider.enabled = enabled == '1' return newProvider def getDefaultNewznabProviders(): return 'NZB.su|http://www.nzb.su/||0' def getProviderModule(name): name = name.lower() prefix = "sickbeard.providers." if name in __all__ and prefix+name in sys.modules: return sys.modules[prefix+name] else: return None def getProviderClass(id): providerMatch = [x for x in sickbeard.providerList+sickbeard.newznabProviderList if x.getID() == id] if len(providerMatch) != 1: return None else: return providerMatch[0]
[ "__all__ = ['eztv','nzbmatrix','nzbs_org','tvbinz','nzbsrus','binreq','womble','newzbin']\r\n", "\r\n", "import sickbeard\r\n", "\r\n", "from os import sys\r\n", "\r\n", "def sortedProviderList():\r\n", "\r\n", " initialList = sickbeard.providerList + sickbeard.newznabProviderList\r\n", " providerDict = dict(zip([x.getID() for x in initialList], initialList))\r\n", "\r\n", " newList = []\r\n", "\r\n", " # add all modules in the priority list, in order\r\n", " for curModule in sickbeard.PROVIDER_ORDER:\r\n", " if curModule in providerDict:\r\n", " newList.append(providerDict[curModule])\r\n", "\r\n", " # add any modules that are missing from that list\r\n", " for curModule in providerDict:\r\n", " if providerDict[curModule] not in newList:\r\n", " newList.append(providerDict[curModule])\r\n", "\r\n", " return newList\r\n", "\r\n", "def makeProviderList():\r\n", "\r\n", " return [x.provider for x in [getProviderModule(y) for y in __all__] if x]\r\n", "\r\n", "def getNewznabProviderList(data):\r\n", "\r\n", " defaultList = [makeNewznabProvider(x) for x in getDefaultNewznabProviders().split('!!!')]\r\n", " providerList = filter(lambda x: x, [makeNewznabProvider(x) for x in data.split('!!!')])\r\n", "\r\n", " providerDict = dict(zip([x.name for x in providerList], providerList))\r\n", "\r\n", " for curDefault in defaultList:\r\n", " if not curDefault:\r\n", " continue\r\n", "\r\n", " if curDefault.name not in providerDict:\r\n", " curDefault.default = True\r\n", " providerList.append(curDefault)\r\n", " else:\r\n", " providerDict[curDefault.name].default = True\r\n", " providerDict[curDefault.name].name = curDefault.name\r\n", " providerDict[curDefault.name].url = curDefault.url\r\n", "\r\n", " return filter(lambda x: x, providerList)\r\n", "\r\n", "\r\n", "def makeNewznabProvider(configString):\r\n", "\r\n", " if not configString:\r\n", " return None\r\n", "\r\n", " name, url, key, enabled = configString.split('|')\r\n", "\r\n", " newznab = sys.modules['sickbeard.providers.newznab']\r\n", "\r\n", " newProvider = newznab.NewznabProvider(name, url)\r\n", " newProvider.key = key\r\n", " newProvider.enabled = enabled == '1'\r\n", "\r\n", " return newProvider\r\n", "\r\n", "def getDefaultNewznabProviders():\r\n", " return 'NZB.su|http://www.nzb.su/||0'\r\n", "\r\n", "\r\n", "def getProviderModule(name):\r\n", " name = name.lower()\r\n", " prefix = \"sickbeard.providers.\"\r\n", " if name in __all__ and prefix+name in sys.modules:\r\n", " return sys.modules[prefix+name]\r\n", " else:\r\n", " return None\r\n", "\r\n", "def getProviderClass(id):\r\n", "\r\n", " providerMatch = [x for x in sickbeard.providerList+sickbeard.newznabProviderList if x.getID() == id]\r\n", "\r\n", " if len(providerMatch) != 1:\r\n", " return None\r\n", " else:\r\n", " return providerMatch[0]\r\n" ]
[ 0.08888888888888889, 0, 0, 0, 0, 0, 0.037037037037037035, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04, 0, 0, 0, 0.02857142857142857, 0, 0.010526315789473684, 0.010752688172043012, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02857142857142857, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.037037037037037035, 0, 0.009433962264150943, 0, 0, 0, 0, 0 ]
86
0.003382
# -*- coding: utf-8 -*- """ Tabbed container with all plot widgets Author: Christian Münker """ from __future__ import print_function, division, unicode_literals, absolute_import from PyQt4 import QtGui from pyfda.plot_widgets import (plot_hf, plot_phi, plot_pz, plot_tau_g, plot_impz, plot_3d) #------------------------------------------------------------------------------ class PlotWidgets(QtGui.QWidget): def __init__(self): QtGui.QWidget.__init__(self) # css = """ #QTabBar{ #font-weight:normal; #} # #""" # self.setStyleSheet(css) self.pltHf = plot_hf.PlotHf() self.pltPhi = plot_phi.PlotPhi() self.pltPZ = plot_pz.PlotPZ() self.pltTauG = plot_tau_g.PlotTauG() self.pltImpz = plot_impz.PlotImpz() self.plt3D = plot_3d.Plot3D() self.initUI() #------------------------------------------------------------------------------ def initUI(self): """ Initialize UI with tabbed subplots """ tabWidget = QtGui.QTabWidget() tabWidget.addTab(self.pltHf, '|H(f)|') tabWidget.addTab(self.pltPhi, 'phi(f)') tabWidget.addTab(self.pltPZ, 'P/Z') tabWidget.addTab(self.pltTauG, 'tau_g') tabWidget.addTab(self.pltImpz, 'h[n]') tabWidget.addTab(self.plt3D, '3D') layVMain = QtGui.QVBoxLayout() layVMain.addWidget(tabWidget) layVMain.setContentsMargins(1,1,1,1)#(left, top, right, bottom) # self.setLayout(layVMain) #------------------------------------------------------------------------------ def updateData(self): """ Update and redraw all subplots with new filter DATA""" self.pltHf.draw() self.pltPhi.draw() self.pltPZ.draw() self.pltTauG.draw() self.pltImpz.draw() self.plt3D.draw() #------------------------------------------------------------------------------ def updateSpecs(self): """ Update and redraw all subplots with new filter SPECS""" self.pltHf.draw() self.pltPhi.draw() self.pltTauG.draw() self.pltImpz.draw() #------------------------------------------------------------------------ def main(): import sys app = QtGui.QApplication(sys.argv) form = PlotWidgets() form.show() app.exec_() if __name__ == "__main__": main()
[ "# -*- coding: utf-8 -*-\n", "\"\"\"\n", "Tabbed container with all plot widgets\n", "\n", "Author: Christian Münker\n", "\"\"\"\n", "from __future__ import print_function, division, unicode_literals, absolute_import\n", "from PyQt4 import QtGui\n", "\n", "from pyfda.plot_widgets import (plot_hf, plot_phi, plot_pz, plot_tau_g, plot_impz,\n", " plot_3d)\n", "\n", "#------------------------------------------------------------------------------\n", "class PlotWidgets(QtGui.QWidget):\n", " def __init__(self):\n", " QtGui.QWidget.__init__(self)\n", "# css = \"\"\"\n", "#QTabBar{\n", "#font-weight:normal;\n", "#}\n", "#\n", "#\"\"\"\n", "\n", "# self.setStyleSheet(css)\n", " self.pltHf = plot_hf.PlotHf()\n", " self.pltPhi = plot_phi.PlotPhi()\n", " self.pltPZ = plot_pz.PlotPZ()\n", " self.pltTauG = plot_tau_g.PlotTauG()\n", " self.pltImpz = plot_impz.PlotImpz()\n", " self.plt3D = plot_3d.Plot3D()\n", "\n", " self.initUI()\n", "\n", "\n", "#------------------------------------------------------------------------------\n", " def initUI(self):\n", " \"\"\" Initialize UI with tabbed subplots \"\"\"\n", " tabWidget = QtGui.QTabWidget()\n", " tabWidget.addTab(self.pltHf, '|H(f)|')\n", " tabWidget.addTab(self.pltPhi, 'phi(f)')\n", " tabWidget.addTab(self.pltPZ, 'P/Z')\n", " tabWidget.addTab(self.pltTauG, 'tau_g')\n", " tabWidget.addTab(self.pltImpz, 'h[n]')\n", " tabWidget.addTab(self.plt3D, '3D')\n", "\n", " layVMain = QtGui.QVBoxLayout()\n", " layVMain.addWidget(tabWidget)\n", " layVMain.setContentsMargins(1,1,1,1)#(left, top, right, bottom)\n", "#\n", " self.setLayout(layVMain)\n", "\n", "\n", "#------------------------------------------------------------------------------\n", " def updateData(self):\n", " \"\"\" Update and redraw all subplots with new filter DATA\"\"\"\n", " self.pltHf.draw()\n", " self.pltPhi.draw()\n", " self.pltPZ.draw()\n", " self.pltTauG.draw()\n", " self.pltImpz.draw()\n", " self.plt3D.draw()\n", " \n", "#------------------------------------------------------------------------------\n", " def updateSpecs(self):\n", " \"\"\" Update and redraw all subplots with new filter SPECS\"\"\"\n", " self.pltHf.draw()\n", " self.pltPhi.draw()\n", " self.pltTauG.draw()\n", " self.pltImpz.draw()\n", "\n", "#------------------------------------------------------------------------\n", "\n", "def main():\n", " import sys\n", " app = QtGui.QApplication(sys.argv)\n", " form = PlotWidgets()\n", " form.show()\n", " app.exec_()\n", "\n", "if __name__ == \"__main__\":\n", " main()\n" ]
[ 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0.012048192771084338, 0.02857142857142857, 0, 0.0125, 0.029411764705882353, 0, 0, 0, 0.1, 0.047619047619047616, 0.3333333333333333, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0125, 0.045454545454545456, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.06944444444444445, 0, 0, 0, 0, 0.0125, 0.038461538461538464, 0, 0, 0, 0, 0, 0, 0, 0.1111111111111111, 0.0125, 0, 0, 0, 0, 0, 0, 0, 0.013513513513513514, 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0.037037037037037035, 0 ]
81
0.014955
from djangosolr.documents.options import Options from djangosolr.documents.manager import ensure_default_manager from djangosolr import solr class DocumentBase(type): def __new__(cls, name, bases, attrs): super_new = super(DocumentBase, cls).__new__ new_class = super_new(cls, name, bases, {'__module__': attrs.pop('__module__')}) attr_meta = attrs.pop('Meta', None) if not attr_meta: meta = getattr(new_class, 'Meta', None) else: meta = attr_meta new_class._add_to_class('_meta', Options(meta)) if getattr(new_class, '_default_manager', None): new_class._default_manager = None new_class._base_manager = None for obj_name, obj in attrs.items(): new_class._add_to_class(obj_name, obj) new_class._prepare_class() return new_class def _add_to_class(cls, name, value): if hasattr(value, '_contribute_to_class'): value._contribute_to_class(cls, name) else: setattr(cls, name, value) def _prepare_class(cls): opts = cls._meta opts._prepare_class(cls) ensure_default_manager(cls) class Document(object): __metaclass__ = DocumentBase def __init__(self, **kwargs): for field in self._meta.fields: if field.name in kwargs: setattr(self, field.name, kwargs.pop(field.name)) else: setattr(self, field.name, field.get_default()) if kwargs: raise KeyError(kwargs.keys()[0]) @classmethod def create(cls, om): document = cls() if isinstance(om, dict): for field in cls._meta.fields: name = cls._meta.get_solr_field_name(field) if om.has_key(name): setattr(document, field.name, field.convert(om[name])) else: for field in cls._meta.fields: if hasattr(om, field.name): setattr(document, field.name, getattr(om, field.name)) return document def save(self): return solr.save([self]) def pre_save(self): pass def delete(self): return solr.delete([self]) def pre_delete(self): pass
[ "from djangosolr.documents.options import Options\r\n", "from djangosolr.documents.manager import ensure_default_manager\r\n", "from djangosolr import solr\r\n", "\r\n", "class DocumentBase(type):\r\n", " \r\n", " def __new__(cls, name, bases, attrs):\r\n", " super_new = super(DocumentBase, cls).__new__\r\n", " new_class = super_new(cls, name, bases, {'__module__': attrs.pop('__module__')})\r\n", " \r\n", " attr_meta = attrs.pop('Meta', None)\r\n", " if not attr_meta:\r\n", " meta = getattr(new_class, 'Meta', None)\r\n", " else:\r\n", " meta = attr_meta\r\n", " new_class._add_to_class('_meta', Options(meta))\r\n", "\r\n", " if getattr(new_class, '_default_manager', None):\r\n", " new_class._default_manager = None\r\n", " new_class._base_manager = None\r\n", "\r\n", " for obj_name, obj in attrs.items():\r\n", " new_class._add_to_class(obj_name, obj)\r\n", "\r\n", " new_class._prepare_class()\r\n", " \r\n", " return new_class\r\n", " \r\n", " def _add_to_class(cls, name, value):\r\n", " if hasattr(value, '_contribute_to_class'):\r\n", " value._contribute_to_class(cls, name)\r\n", " else:\r\n", " setattr(cls, name, value)\r\n", " \r\n", " def _prepare_class(cls):\r\n", " opts = cls._meta\r\n", " opts._prepare_class(cls)\r\n", " ensure_default_manager(cls)\r\n", "\r\n", "class Document(object):\r\n", " \r\n", " __metaclass__ = DocumentBase\r\n", " \r\n", " def __init__(self, **kwargs):\r\n", " for field in self._meta.fields:\r\n", " if field.name in kwargs:\r\n", " setattr(self, field.name, kwargs.pop(field.name))\r\n", " else:\r\n", " setattr(self, field.name, field.get_default())\r\n", " if kwargs:\r\n", " raise KeyError(kwargs.keys()[0])\r\n", " \r\n", " @classmethod\r\n", " def create(cls, om):\r\n", " document = cls()\r\n", " if isinstance(om, dict):\r\n", " for field in cls._meta.fields:\r\n", " name = cls._meta.get_solr_field_name(field)\r\n", " if om.has_key(name):\r\n", " setattr(document, field.name, field.convert(om[name]))\r\n", " else:\r\n", " for field in cls._meta.fields:\r\n", " if hasattr(om, field.name):\r\n", " setattr(document, field.name, getattr(om, field.name))\r\n", " return document\r\n", " \r\n", " def save(self):\r\n", " return solr.save([self])\r\n", "\r\n", " def pre_save(self):\r\n", " pass\r\n", " \r\n", " def delete(self):\r\n", " return solr.delete([self])\r\n", "\r\n", " def pre_delete(self):\r\n", " pass\r\n" ]
[ 0, 0, 0, 0, 0.037037037037037035, 0.16666666666666666, 0, 0, 0.011111111111111112, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0.16666666666666666, 0, 0, 0, 0, 0, 0.16666666666666666, 0, 0, 0, 0, 0, 0.04, 0.16666666666666666, 0, 0.16666666666666666, 0, 0, 0, 0, 0, 0, 0, 0, 0.16666666666666666, 0, 0, 0, 0, 0, 0, 0.02631578947368421, 0, 0, 0, 0, 0, 0, 0.16666666666666666, 0, 0, 0, 0, 0, 0.16666666666666666, 0, 0, 0, 0, 0 ]
77
0.0214
import cairo import ctypes from auv_python_helpers import load_library # XXX: this binding is missing from pycairo? # If cairo ever changes their header, this could break! FORMAT_RGB16_565 = 4 class Lcd: """ Ctypes wrapper class for Lcd @ lcd.h """ WIDTH = 320 HEIGHT = 240 def __init__(self): self.lib = load_library("liblcd.so") self.obj = self.lib.Lcd_new() def __del__(self): try: self.lib.Lcd_destroy(self.obj) except: pass def init(self): self.lib.Lcd_init(self.obj) def clear(self): self.lib.Lcd_clear(self.obj) def write_surface(self, surface): assert(type(surface) == cairo.ImageSurface) # XXX: Mad hax going on here. Let me explain. # We need a pointer to the underlying cairo surface. Since pycairo wraps # the cairo C library, and we know that the Pycairo_ImageSurface object's # first element is a pointer to the C struct, we can get the address by # taking the python object's address and skipping its PyObject_HEAD. # # In other words, this is wildly dependant on the internal implementation # of pycairo and may break in the future. self.lib.Lcd_writesurface(self.obj, ctypes.c_char_p.from_address(id(surface) + object.__basicsize__)) @classmethod def new_surface(cls): surface = cairo.ImageSurface(FORMAT_RGB16_565, cls.WIDTH, cls.HEIGHT) cr = cairo.Context(surface) return surface, cr
[ "import cairo\n", "import ctypes\n", "\n", "from auv_python_helpers import load_library\n", "\n", "# XXX: this binding is missing from pycairo?\n", "# If cairo ever changes their header, this could break!\n", "FORMAT_RGB16_565 = 4\n", "\n", "class Lcd:\n", " \"\"\"\n", " Ctypes wrapper class for Lcd @ lcd.h\n", " \"\"\"\n", "\n", " WIDTH = 320\n", " HEIGHT = 240\n", "\n", " def __init__(self):\n", " self.lib = load_library(\"liblcd.so\")\n", " self.obj = self.lib.Lcd_new()\n", "\n", " def __del__(self):\n", " try:\n", " self.lib.Lcd_destroy(self.obj)\n", " except:\n", " pass\n", "\n", " def init(self):\n", " self.lib.Lcd_init(self.obj)\n", "\n", " def clear(self):\n", " self.lib.Lcd_clear(self.obj)\n", "\n", " def write_surface(self, surface):\n", " assert(type(surface) == cairo.ImageSurface)\n", " # XXX: Mad hax going on here. Let me explain.\n", " # We need a pointer to the underlying cairo surface. Since pycairo wraps\n", " # the cairo C library, and we know that the Pycairo_ImageSurface object's\n", " # first element is a pointer to the C struct, we can get the address by\n", " # taking the python object's address and skipping its PyObject_HEAD.\n", " # \n", " # In other words, this is wildly dependant on the internal implementation\n", " # of pycairo and may break in the future.\n", " self.lib.Lcd_writesurface(self.obj, ctypes.c_char_p.from_address(id(surface) + object.__basicsize__))\n", "\n", " @classmethod\n", " def new_surface(cls):\n", " surface = cairo.ImageSurface(FORMAT_RGB16_565, cls.WIDTH, cls.HEIGHT)\n", " cr = cairo.Context(surface)\n", " return surface, cr\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.09090909090909091, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0625, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0.012195121951219513, 0, 0, 0.09090909090909091, 0.012195121951219513, 0, 0.00909090909090909, 0, 0, 0, 0, 0, 0 ]
50
0.005803
#!/usr/bin/python ''' INFO ---- This module is uses the rules defined patterns.json of the Gitrob project: https://github.com/michenriksen/gitrob created by Michael Henriksen. ''' import os, sys, re, json, base64 import log def isUnique(total_issues, filename, linenum): if len(total_issues) == 0: return True for issue in total_issues: if issue["file"] == filename and issue["line"] == linenum: print "found duplicate" return False else: return True def load_gitrob_rules(fname): file = open(fname, 'r') return json.loads(file.read()) def gitrob_scan(root_path, rules_path): global gitrob_issues, gitrob_rules gitrob_issues = [] gitrob_rules = load_gitrob_rules(rules_path) #load all the gitrob rules for directory, sub_dirs, files in os.walk(root_path): flag = False for _file in files: flag = False gitrob_issue = {} _filename, _filext = os.path.splitext(_file) file_path = os.path.join(directory, _file) for gitrob_rule in gitrob_rules: pattern = re.compile(base64.b64decode(gitrob_rule['pattern']), re.IGNORECASE) if gitrob_rule['part'] == 'filename' and pattern.search(_file): flag = True elif gitrob_rule['part'] == 'extension' and pattern.search(_filext): flag = True elif gitrob_rule['part'] == 'path' and pattern.search(file_path): flag = True if flag: gitrob_issue['warning_type'] = 'Sensitive Information Disclosure' gitrob_issue['warning_code'] = 'SID' gitrob_issue['message'] = str(gitrob_rule['caption']) gitrob_issue['file'] = re.sub('\/var\/raptor\/(clones|uploads)\/[a-zA-Z0-9]{56}\/', '', file_path.replace(os.getcwd(), '').replace(root_path, '')).lstrip('/') gitrob_issue['line'] = '1' gitrob_issue['link'] = 'https://www.owasp.org/index.php/Top_10_2013-A6-Sensitive_Data_Exposure' gitrob_issue['code'] = 'n/a' gitrob_issue['severity'] = 'High' gitrob_issue['plugin'] = 'gitrob' gitrob_issue['signature'] = str(gitrob_rule['pattern']) gitrob_issue['location'] = 'n/a' gitrob_issue['user_input'] = 'n/a' gitrob_issue['render_path'] = 'n/a' if isUnique(gitrob_issues, gitrob_issue['file'], gitrob_issue['line']): gitrob_issues.append(gitrob_issue) flag = False return gitrob_issues
[ "#!/usr/bin/python\n", "'''\n", "\n", "INFO\n", "----\n", "This module is uses the rules defined patterns.json of the Gitrob project:\n", "https://github.com/michenriksen/gitrob created by Michael Henriksen.\n", "'''\n", "import os, sys, re, json, base64\n", "import log\n", "\n", "def isUnique(total_issues, filename, linenum):\n", " if len(total_issues) == 0:\n", " return True\n", " for issue in total_issues:\n", " if issue[\"file\"] == filename and issue[\"line\"] == linenum:\n", " print \"found duplicate\"\n", " return False\n", " else:\n", " return True\n", "\n", "def load_gitrob_rules(fname):\n", " file = open(fname, 'r')\n", " return json.loads(file.read())\n", "\n", "def gitrob_scan(root_path, rules_path):\n", " global gitrob_issues, gitrob_rules\n", " gitrob_issues = []\n", " gitrob_rules = load_gitrob_rules(rules_path) #load all the gitrob rules\n", " for directory, sub_dirs, files in os.walk(root_path):\n", " flag = False\n", " for _file in files:\n", " flag = False\n", " gitrob_issue = {}\n", " _filename, _filext = os.path.splitext(_file)\n", " file_path = os.path.join(directory, _file)\n", " for gitrob_rule in gitrob_rules:\n", " pattern = re.compile(base64.b64decode(gitrob_rule['pattern']), re.IGNORECASE)\n", " if gitrob_rule['part'] == 'filename' and pattern.search(_file):\n", " flag = True\n", " elif gitrob_rule['part'] == 'extension' and pattern.search(_filext):\n", " flag = True\n", " elif gitrob_rule['part'] == 'path' and pattern.search(file_path):\n", " flag = True\n", " if flag:\n", " gitrob_issue['warning_type'] = 'Sensitive Information Disclosure'\n", " gitrob_issue['warning_code'] = 'SID'\n", " gitrob_issue['message'] = str(gitrob_rule['caption'])\n", " gitrob_issue['file'] = re.sub('\\/var\\/raptor\\/(clones|uploads)\\/[a-zA-Z0-9]{56}\\/', '', file_path.replace(os.getcwd(), '').replace(root_path, '')).lstrip('/')\n", " gitrob_issue['line'] = '1'\n", " gitrob_issue['link'] = 'https://www.owasp.org/index.php/Top_10_2013-A6-Sensitive_Data_Exposure'\n", " gitrob_issue['code'] = 'n/a'\n", " gitrob_issue['severity'] = 'High'\n", " gitrob_issue['plugin'] = 'gitrob'\n", " gitrob_issue['signature'] = str(gitrob_rule['pattern'])\n", " gitrob_issue['location'] = 'n/a'\n", " gitrob_issue['user_input'] = 'n/a'\n", " gitrob_issue['render_path'] = 'n/a'\n", " if isUnique(gitrob_issues, gitrob_issue['file'], gitrob_issue['line']):\n", " gitrob_issues.append(gitrob_issue)\n", " flag = False\n", " return gitrob_issues\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0.030303030303030304, 0, 0, 0.02127659574468085, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03333333333333333, 0, 0, 0, 0.025, 0, 0, 0.02631578947368421, 0, 0, 0, 0, 0, 0, 0, 0, 0.010638297872340425, 0, 0, 0.011764705882352941, 0, 0.012195121951219513, 0, 0, 0.022988505747126436, 0, 0, 0.0335195530726257, 0, 0.008620689655172414, 0, 0, 0, 0, 0, 0, 0, 0.010869565217391304, 0, 0, 0 ]
62
0.003981
# (c) Copyright 2015 by James Stout # Licensed under the LGPL, see <http://www.gnu.org/licenses/> from dragonfly import ( Dictation, Grammar, IntegerRef, Key, MappingRule, Text, ) import _linux_utils as linux def Exec(command): return Key("a-x") + Text(command) + Key("enter") class CommandRule(MappingRule): mapping = { "dragonfly add buffer": Exec("dragonfly-add-buffer"), "dragonfly add word": Exec("dragonfly-add-word"), "dragonfly blacklist word": Exec("dragonfly-blacklist-word"), "Foreclosure next": Exec("4clojure-next-question"), "Foreclosure previous": Exec("4clojure-previous-question"), "Foreclosure check": Exec("4clojure-check-answers"), "confirm": Text("yes") + Key("enter"), "confirm short": Text("y"), "deny": Text("no") + Key("enter"), "deny short": Text("n"), "relative line numbers": Exec("linum-relative-toggle"), "buff revert": Exec("revert-buffer"), "emacs close now": Key("c-x, c-c"), } extras = [ IntegerRef("n", 1, 20), IntegerRef("line", 1, 10000), Dictation("text"), ] defaults = { "n": 1, } context = linux.UniversalAppContext(title = "Emacs editor") grammar = Grammar("Emacs", context=context) grammar.add_rule(CommandRule()) grammar.load() # Unload function which will be called by natlink at unload time. def unload(): global grammar if grammar: grammar.unload() grammar = None
[ "# (c) Copyright 2015 by James Stout\n", "# Licensed under the LGPL, see <http://www.gnu.org/licenses/>\n", "\n", "from dragonfly import (\n", " Dictation,\n", " Grammar,\n", " IntegerRef,\n", " Key,\n", " MappingRule,\n", " Text,\n", ")\n", "import _linux_utils as linux\n", "\n", "\n", "def Exec(command):\n", " return Key(\"a-x\") + Text(command) + Key(\"enter\")\n", "\n", "\n", "class CommandRule(MappingRule):\n", " mapping = {\n", " \"dragonfly add buffer\": Exec(\"dragonfly-add-buffer\"),\n", " \"dragonfly add word\": Exec(\"dragonfly-add-word\"),\n", " \"dragonfly blacklist word\": Exec(\"dragonfly-blacklist-word\"),\n", " \"Foreclosure next\": Exec(\"4clojure-next-question\"),\n", " \"Foreclosure previous\": Exec(\"4clojure-previous-question\"),\n", " \"Foreclosure check\": Exec(\"4clojure-check-answers\"),\n", " \"confirm\": Text(\"yes\") + Key(\"enter\"),\n", " \"confirm short\": Text(\"y\"),\n", " \"deny\": Text(\"no\") + Key(\"enter\"),\n", " \"deny short\": Text(\"n\"),\n", " \"relative line numbers\": Exec(\"linum-relative-toggle\"),\n", " \"buff revert\": Exec(\"revert-buffer\"),\n", " \"emacs close now\": Key(\"c-x, c-c\"),\n", " }\n", " extras = [\n", " IntegerRef(\"n\", 1, 20),\n", " IntegerRef(\"line\", 1, 10000),\n", " Dictation(\"text\"),\n", " ]\n", " defaults = {\n", " \"n\": 1,\n", " }\n", "\n", "\n", "context = linux.UniversalAppContext(title = \"Emacs editor\")\n", "grammar = Grammar(\"Emacs\", context=context)\n", "grammar.add_rule(CommandRule())\n", "grammar.load()\n", "\n", "\n", "# Unload function which will be called by natlink at unload time.\n", "def unload():\n", " global grammar\n", " if grammar: grammar.unload()\n", " grammar = None\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0.030303030303030304, 0 ]
55
0.001157
# -*- coding: utf-8 -*- from __future__ import print_function, division, absolute_import, unicode_literals from operator import itemgetter from os import urandom from datetime import timedelta, date from fs.tests import FSTestCases from mock import Mock, call from pytest import fixture, raises, mark from fs.memoryfs import MemoryFS from fs.errors import ResourceNotFoundError, ResourceInvalidError import unittest from cuckoodrive.partedfs import PartedFS, PartedFile, FilePart, InvalidFilePointerLocation from cuckoodrive.utils import kb class TestExternalPartedFS(unittest.TestCase, FSTestCases): def setUp(self): self.fs = PartedFS(MemoryFS(), kb(100)) def tearDown(self): self.fs.close() @mark.xfail(reason="Appending does not work yet") def test_readwriteappendseek(self): super(TestExternalPartedFS, self).test_readwriteappendseek() @mark.xfail(reason="FS is not truncatable") def test_truncate_to_larger_size(self): super(TestExternalPartedFS, self).test_truncate_to_larger_size() @mark.xfail(reason="FS is not truncatable") def test_truncate(self): super(TestExternalPartedFS, self).test_truncate() class TestPartedFS(object): @fixture def fs(self): return PartedFS(MemoryFS(), kb(4)) @fixture def fs_with_folder_structure(self, fs): fs.wrapped_fs.setcontents("backup.tar.part0", data=urandom(kb(4))) fs.wrapped_fs.setcontents("backup.tar.part1", data=urandom(kb(4))) fs.wrapped_fs.setcontents("README.txt.part0", data=urandom(kb(1))) fs.wrapped_fs.makedir("older_backups") return fs @fixture def fs_with_test_file(self, fs): fs.wrapped_fs.setcontents("backup.tar.part0", data=urandom(kb(4))) fs.wrapped_fs.setcontents("backup.tar.part1", data=urandom(kb(4))) return fs def test_encode_should_return_file_with_part0_extension(self, fs): # Arrange path = "backup.tar" # Act encoded_path = fs._encode(path) # Assert assert encoded_path == "backup.tar.part0" def test_encode_appends_given_index_to_extension(self, fs): # Arrange path = "backup.tar" # Act encoded_path = fs._encode(path, part_index=2) # Assert assert encoded_path == "backup.tar.part2" def test_decode_should_return_part_without_part_extension(self, fs): # Arrange encoded_path = "backup.tar.part1" # Act decoded_path = fs._decode(encoded_path) # Assert assert decoded_path == "backup.tar" def test_listparts_returns_all_parts_for_a_path(self, fs_with_test_file): # Act listing = fs_with_test_file.listparts("backup.tar") # Assert assert sorted(listing) == ["backup.tar.part0", "backup.tar.part1"] def test_exists_returns_true_when_first_part_could_be_found(self, fs): # Arrange fs.wrapped_fs.setcontents("backup.tar.part0", data=urandom(kb(4))) # Act & Assert assert fs.exists("backup.tar") def test_isfile_returns_wether_the_first_part_is_file(self, fs): # Arrange fs.wrapped_fs.isfile = Mock() # Act fs.isfile("backup.tar") # Act fs.wrapped_fs.isfile.assert_called_once_with("backup.tar.part0") def test_exists_returns_false_when_first_part_is_not_found(self, fs): # Act & Assert assert not fs.exists("backup.tar") def test_remove_deletes_all_parts(self, fs): # Arrange fs.wrapped_fs.setcontents("backup.tar.part0", data=urandom(kb(4))) fs.wrapped_fs.setcontents("backup.tar.part1", data=urandom(kb(4))) fs.wrapped_fs.setcontents("backup.tar.part2", data=urandom(kb(2))) fs.wrapped_fs.remove = Mock() # Act fs.remove("backup.tar") # Assert fs.wrapped_fs.remove.assert_has_calls([call("backup.tar.part0"), call("backup.tar.part1"), call("backup.tar.part2")], any_order=True) def test_listdir_returns_only_directories(self, fs_with_folder_structure): # Act listing = fs_with_folder_structure.listdir(dirs_only=True) # Assert assert listing == ["older_backups"] def test_listdir_returns_only_wildcard_matches(self, fs_with_folder_structure): # Act listing = fs_with_folder_structure.listdir(wildcard="*backup*") # Assert assert listing == ["older_backups", "backup.tar"] def test_listdir_raises_error_if_path_is_file(self, fs_with_folder_structure): # Act & Assert with raises(ResourceInvalidError): fs_with_folder_structure.listdir("README.txt") def test_listdir_returns_only_files(self, fs_with_folder_structure): # Act listing = sorted(fs_with_folder_structure.listdir(files_only=True)) # Assert assert listing == ["README.txt", "backup.tar"] def test_listdir_returns_files_and_directories(self, fs_with_folder_structure): # Act listing = sorted(fs_with_folder_structure.listdir()) # Assert assert listing == ["README.txt", "backup.tar", "older_backups"] def test_listdirinfo_raises_error_when_not_exists(self, fs): # Act & Assert with raises(ResourceNotFoundError): fs.listdirinfo("random_dir") def test_listdirinfo_returns_path_and_infodict(self, fs_with_folder_structure): # Arrange info = {} fs_with_folder_structure.getinfo = Mock(return_value=info) # Act listing = sorted(fs_with_folder_structure.listdirinfo(), key=itemgetter(0)) # Assert assert listing == [("README.txt", info), ("backup.tar", info), ("older_backups", info)] def test_open_if_w_in_mode_all_parts_should_be_removed(self, fs_with_test_file): # Arrange fs_with_test_file.remove = Mock() # Act fs_with_test_file.open("backup.tar", mode="w") # Assert fs_with_test_file.remove.assert_called_once_with("backup.tar") def test_open_raises_error_if_w_and_a_not_in_mode(self, fs): # Act & Assert with raises(ResourceNotFoundError): fs.open("i_dont_exist", mode="r") def test_open_raises_error_if_path_is_directory(self, fs): # Arrange fs.makedir("backups") # Act & Assert with raises(ResourceInvalidError): fs.open("backups", mode="w") def test_open_raises_error_if_path_does_not_exist(self, fs): # Arrange path = "backup.tar" # Act & Assert with raises(ResourceNotFoundError): fs.open(path, mode="r") with raises(ResourceNotFoundError): fs.open(path, mode="r+") def test_open_creates_empty_file_if_path_does_not_exist(self, fs): # Arrange path = "backup.tar" # Act f = fs.open(path, mode="w") # Assert assert len(f.parts) == 1 def test_open_uses_existing_parts_if_path_exists(self, fs_with_test_file): # Act f = fs_with_test_file.open("backup.tar", mode="r+") # Assert assert len(f.parts) == 2 def test_open_with_existing_parts_opens_them_in_correct_order(self, fs): # Arrange fs.wrapped_fs.setcontents("backup.tar.part0", data=urandom(kb(4))) fs.wrapped_fs.setcontents("backup.tar.part1", data=urandom(kb(4))) fs.wrapped_fs.setcontents("backup.tar.part2", data=urandom(kb(2))) # Act f = fs.open("backup.tar", mode="r+") # Assert created_parts = [part.name for part in f.parts] assert created_parts == ["backup.tar.part0", "backup.tar.part1", "backup.tar.part2"] def test_open_file_in_directory_returns_file(self, fs): # Arrange fs.wrapped_fs.makedir("foo/bar", recursive=True) fs.setcontents("foo/bar/backup.tar", urandom(kb(4))) # Act assert fs.getsize("foo/bar/backup.tar") == kb(4) def test_rename_raises_error_if_not_exists(self, fs): # Act & Assert with raises(ResourceNotFoundError): fs.rename("you_cant_name_me", "you_cant_name_me2") def test_rename_renames_all_parts(self, fs_with_test_file): # Arrange fs_with_test_file.wrapped_fs.rename = Mock() # Act fs_with_test_file.rename("backup.tar", "backup2.tar") # Assert fs_with_test_file.wrapped_fs.rename.assert_has_calls([ call("backup.tar.part0", "backup2.tar.part0"), call("backup.tar.part1", "backup2.tar.part1")], any_order=True) def test_getsize_returns_sum_of_parts(self, fs_with_test_file): # Act size = fs_with_test_file.getsize("backup.tar") # assert assert size == kb(8) def test_getsize_raises_error_if_not_exists(self, fs): # Act & Assert with raises(ResourceNotFoundError): fs.getsize("im_invisible") def test_getinfo_raises_error_if_not_exists(self, fs): # Act & Assert with raises(ResourceNotFoundError): fs.getinfo("im_invisible") def test_removedir_calls_underlying_fs(self, fs): # Arrange path = "folder" fs.wrapped_fs.removedir = Mock() # Act fs.removedir(path) # Arrange fs.wrapped_fs.removedir.assert_called_once_with(path) def test_isdir_calls_underyling_fs(self, fs): # Arrange path = "/" fs.wrapped_fs.isdir = Mock() # Act fs.isdir(path) # Arrange fs.wrapped_fs.isdir.assert_called_once_with(path) def test_makedir_calls_underyling_fs(self, fs): # Arrange path = "folder" fs.wrapped_fs.makedir = Mock() # Act fs.makedir(path) # Arrange fs.wrapped_fs.makedir.assert_called_once_with(path) def test_getinfo_for_root_returns_information(self, fs): # Act info = fs.getinfo("/") # Assert assert "created_time" in info assert "modified_time" in info assert "accessed_time" in info def test_getinfo_returns_directory_info_for_dir(self, fs): # Arrange created = date.today() + timedelta(days=10) accessed = date.today() + timedelta(days=10) modfied = date.today() + timedelta(days=10) fs.makedir("dir") fs.wrapped_fs.getinfo = Mock(return_value={ "created_time": created, "modified_time": accessed, "accessed_time": modfied}) # Act info = fs.getinfo("dir") # Assert assert info["created_time"] == created assert info["modified_time"] == accessed assert info["accessed_time"] == modfied def test_getinfo_returns_latest_times(self, fs_with_test_file): # Arrange created_max = date.today() + timedelta(days=10) accessed_max = date.today() + timedelta(days=10) modfied_max = date.today() + timedelta(days=10) def getinfo_patch(path): if path == "backup.tar.part0": return {"created_time": created_max, "modified_time": date.today(), "accessed_time": accessed_max} else: return {"created_time": date.today(), "modified_time": modfied_max, "accessed_time": date.today()} fs_with_test_file.wrapped_fs.getinfo = getinfo_patch fs_with_test_file.getsize = lambda p: kb(7) # Act info = fs_with_test_file.getinfo("backup.tar") # Assert assert info["created_time"] == created_max assert info["accessed_time"] == accessed_max assert info["modified_time"] == modfied_max def test_getinfo_returns_info_of_parts(self, fs_with_test_file): # Act info = fs_with_test_file.getinfo("backup.tar") part_infos = info['parts'] # Assert assert len(part_infos) == 2 def test_getinfo_returns_correct_size(self, fs_with_test_file): # Act info = fs_with_test_file.getinfo("backup.tar") # Assert assert info["size"] == kb(8) def test_copy_raises_error_if_not_exists(self, fs): # Act & Assert with raises(ResourceNotFoundError): fs.getinfo("copy_me_if_you_can") def test_copy_copies_the_parts(self, fs_with_test_file): # Arrange fs_with_test_file.makedir("copy_folder") fs_with_test_file.wrapped_fs.copy = Mock() # Act fs_with_test_file.copy("backup.tar", "copy_folder/backup.tar") # Assert fs_with_test_file.wrapped_fs.copy.assert_has_calls([ call("backup.tar.part0", "copy_folder/backup.tar.part0"), call("backup.tar.part1", "copy_folder/backup.tar.part1")], any_order=True) def test_setcontents_creates_file(self, fs): # Act fs.setcontents("backup.tar", urandom(kb(6))) # Assert assert fs.getsize("backup.tar") == kb(6) def test_getcontents_reads_file(self, fs): # Arrange data = urandom(kb(6)) fs.setcontents("backup.tar", data) # Act saved_data = fs.getcontents("backup.tar") # Assert assert saved_data == data class TestPartedFile(object): @fixture def parted_file(self): fs = MemoryFS() mode = "wb+" path = "cuckoo.tar" parts = [FilePart(fs.open("cuckoo.tar.part0", mode)), (fs.open("cuckoo.tar.part1", mode))] return PartedFile(path=path, mode=mode, fs=fs, max_part_size=kb(4), parts=parts) def test_current_part_returns_first_part_when_file_pointer_is_zero(self, parted_file): # Arrange parted_file._file_pointer = 0 # Act & Assert assert parted_file.current_part == parted_file.parts[0] def test_current_part_returns_last_part_when_file_pointer_is_max_part_size(self, parted_file): # Arrange parted_file._file_pointer = kb(4) # Act & Assert assert parted_file.current_part == parted_file.parts[1] def test_current_part_raises_error_when_file_pointer_is_bigger_than_parts(self, parted_file): # Arrange parted_file._mode = "r" parted_file._file_pointer = 4 * kb(4) # Act & Assert with raises(InvalidFilePointerLocation): _ = parted_file.current_part def test_write_returns_none_if_all_data_could_be_written(self, parted_file): # Act unwritten_data = parted_file._write(urandom(kb(4))) # Assert assert unwritten_data is None def test_write_returns_data_that_is_bigger_than_max_part_size(self, parted_file): # Act unwritten_data = parted_file._write(urandom(kb(5))) # Assert assert len(unwritten_data) == kb(1) def test_write_with_flushing_mode_calls_itself_until_all_data_is_written(self, parted_file): # Act unwritten_data = parted_file._write(urandom(kb(5)), flushing=True) # Assert assert unwritten_data is None def test_write_sets_file_pointer_to_next_free_position(self, parted_file): # Act parted_file._write(urandom(kb(4))) # Assert assert parted_file._file_pointer == kb(4) def test_write_big_amount_expands_to_parts(self, parted_file): # Act parted_file._write(urandom(kb(12)), flushing=True) # Assert assert len(parted_file.parts) == 3 def test_seek_absolute_should_set_filepointer_to_offset(self, parted_file): # Arrange parted_file._file_pointer = kb(1) # Act parted_file._seek(offset=kb(0), whence=0) # Assert assert parted_file._file_pointer == kb(0) def test_seek_goes_to_current_part_and_sets_other_parts_to_start(self, parted_file): # Arrange parted_file.parts[0].seek = Mock() parted_file.parts[1].seek = Mock() # Act parted_file._seek(offset=kb(5), whence=0) # Assert parted_file.parts[0].seek.assert_called_once_with(kb(0), 0) parted_file.parts[1].seek.assert_called_once_with(kb(1), 0) def test_seek_relative_should_add_ofset_to_filepointer(self, parted_file): # Arrange parted_file._file_pointer = kb(1) # Act parted_file._seek(offset=kb(1), whence=1) # Assert assert parted_file._file_pointer == kb(2) @mark.xfail def test_seek_relative_to_end_should_set_filepointer_to_last_part(self, parted_file): # Act parted_file._seek(offset=-kb(4), whence=2) # Assert assert parted_file._file_pointer == kb(4) def test_tell_returns_file_pointer(self, parted_file): # Arrange parted_file._file_pointer = kb(2) # Act pos = parted_file._tell() # Assert assert pos == kb(2) def test_read_returns_data_from_current_part_and_calls_itself_for_next_part(self, parted_file): # Arrange parted_file._write(urandom(kb(5)), flushing=True) parted_file._seek(offset=kb(3), whence=0) # Act read_data = parted_file._read() # Assert assert len(read_data) == kb(2) def test_read_returns_data_from_current_part_in_chunks(self, parted_file): # Arrange parted_file._write(urandom(kb(5)), flushing=True) parted_file._seek(offset=kb(3), whence=0) # Act chunk1 = parted_file._read(kb(1)) chunk2 = parted_file._read(kb(1)) # Assert assert len(chunk1) == kb(1) assert len(chunk2) == kb(1) def test_read_returns_only_data_of_current_part_with_bigger_sizehint(self, parted_file): # Arrange parted_file._write(urandom(kb(5)), flushing=True) parted_file._seek(offset=kb(3), whence=0) # Act chunk = parted_file._read(kb(2)) # Assert assert len(chunk) == kb(1) def test_read_returns_none_after_read_all(self, parted_file): # Arrange parted_file._write(urandom(kb(5)), flushing=True) parted_file._seek(offset=0, whence=0) parted_file._read() # Act eof = parted_file._read() # Assert assert eof is None def test_read_as_chunks_returns_none_at_end_of_file(self, parted_file): # Arrange parted_file._write(urandom(kb(5)), flushing=True) parted_file._seek(offset=kb(4), whence=0) parted_file._read(kb(1)) # Act eof = parted_file._read(kb(1)) # Assert assert eof is None def test_close_calls_super_for_flush_and_closes_all_parts(self, parted_file): # Arrange parted_file._write(urandom(kb(4))) parted_file.parts[0].close = Mock() parted_file.parts[1].close = Mock() # Act parted_file.close() # Assert parted_file.parts[0].close.assert_called_with() parted_file.parts[1].close.assert_called_with()
[ "# -*- coding: utf-8 -*-\n", "from __future__ import print_function, division, absolute_import, unicode_literals\n", "from operator import itemgetter\n", "from os import urandom\n", "from datetime import timedelta, date\n", "from fs.tests import FSTestCases\n", "\n", "from mock import Mock, call\n", "from pytest import fixture, raises, mark\n", "\n", "from fs.memoryfs import MemoryFS\n", "from fs.errors import ResourceNotFoundError, ResourceInvalidError\n", "import unittest\n", "\n", "from cuckoodrive.partedfs import PartedFS, PartedFile, FilePart, InvalidFilePointerLocation\n", "from cuckoodrive.utils import kb\n", "\n", "\n", "class TestExternalPartedFS(unittest.TestCase, FSTestCases):\n", " def setUp(self):\n", " self.fs = PartedFS(MemoryFS(), kb(100))\n", "\n", " def tearDown(self):\n", " self.fs.close()\n", "\n", " @mark.xfail(reason=\"Appending does not work yet\")\n", " def test_readwriteappendseek(self):\n", " super(TestExternalPartedFS, self).test_readwriteappendseek()\n", "\n", " @mark.xfail(reason=\"FS is not truncatable\")\n", " def test_truncate_to_larger_size(self):\n", " super(TestExternalPartedFS, self).test_truncate_to_larger_size()\n", "\n", " @mark.xfail(reason=\"FS is not truncatable\")\n", " def test_truncate(self):\n", " super(TestExternalPartedFS, self).test_truncate()\n", "\n", "\n", "class TestPartedFS(object):\n", " @fixture\n", " def fs(self):\n", " return PartedFS(MemoryFS(), kb(4))\n", "\n", " @fixture\n", " def fs_with_folder_structure(self, fs):\n", " fs.wrapped_fs.setcontents(\"backup.tar.part0\", data=urandom(kb(4)))\n", " fs.wrapped_fs.setcontents(\"backup.tar.part1\", data=urandom(kb(4)))\n", " fs.wrapped_fs.setcontents(\"README.txt.part0\", data=urandom(kb(1)))\n", " fs.wrapped_fs.makedir(\"older_backups\")\n", " return fs\n", "\n", " @fixture\n", " def fs_with_test_file(self, fs):\n", " fs.wrapped_fs.setcontents(\"backup.tar.part0\", data=urandom(kb(4)))\n", " fs.wrapped_fs.setcontents(\"backup.tar.part1\", data=urandom(kb(4)))\n", " return fs\n", "\n", " def test_encode_should_return_file_with_part0_extension(self, fs):\n", " # Arrange\n", " path = \"backup.tar\"\n", " # Act\n", " encoded_path = fs._encode(path)\n", " # Assert\n", " assert encoded_path == \"backup.tar.part0\"\n", "\n", " def test_encode_appends_given_index_to_extension(self, fs):\n", " # Arrange\n", " path = \"backup.tar\"\n", " # Act\n", " encoded_path = fs._encode(path, part_index=2)\n", " # Assert\n", " assert encoded_path == \"backup.tar.part2\"\n", "\n", " def test_decode_should_return_part_without_part_extension(self, fs):\n", " # Arrange\n", " encoded_path = \"backup.tar.part1\"\n", " # Act\n", " decoded_path = fs._decode(encoded_path)\n", " # Assert\n", " assert decoded_path == \"backup.tar\"\n", "\n", " def test_listparts_returns_all_parts_for_a_path(self, fs_with_test_file):\n", " # Act\n", " listing = fs_with_test_file.listparts(\"backup.tar\")\n", " # Assert\n", " assert sorted(listing) == [\"backup.tar.part0\", \"backup.tar.part1\"]\n", "\n", " def test_exists_returns_true_when_first_part_could_be_found(self, fs):\n", " # Arrange\n", " fs.wrapped_fs.setcontents(\"backup.tar.part0\", data=urandom(kb(4)))\n", " # Act & Assert\n", " assert fs.exists(\"backup.tar\")\n", "\n", " def test_isfile_returns_wether_the_first_part_is_file(self, fs):\n", " # Arrange\n", " fs.wrapped_fs.isfile = Mock()\n", " # Act\n", " fs.isfile(\"backup.tar\")\n", " # Act\n", " fs.wrapped_fs.isfile.assert_called_once_with(\"backup.tar.part0\")\n", "\n", " def test_exists_returns_false_when_first_part_is_not_found(self, fs):\n", " # Act & Assert\n", " assert not fs.exists(\"backup.tar\")\n", "\n", " def test_remove_deletes_all_parts(self, fs):\n", " # Arrange\n", " fs.wrapped_fs.setcontents(\"backup.tar.part0\", data=urandom(kb(4)))\n", " fs.wrapped_fs.setcontents(\"backup.tar.part1\", data=urandom(kb(4)))\n", " fs.wrapped_fs.setcontents(\"backup.tar.part2\", data=urandom(kb(2)))\n", " fs.wrapped_fs.remove = Mock()\n", " # Act\n", " fs.remove(\"backup.tar\")\n", " # Assert\n", " fs.wrapped_fs.remove.assert_has_calls([call(\"backup.tar.part0\"),\n", " call(\"backup.tar.part1\"),\n", " call(\"backup.tar.part2\")], any_order=True)\n", "\n", " def test_listdir_returns_only_directories(self, fs_with_folder_structure):\n", " # Act\n", " listing = fs_with_folder_structure.listdir(dirs_only=True)\n", " # Assert\n", " assert listing == [\"older_backups\"]\n", "\n", " def test_listdir_returns_only_wildcard_matches(self, fs_with_folder_structure):\n", " # Act\n", " listing = fs_with_folder_structure.listdir(wildcard=\"*backup*\")\n", " # Assert\n", " assert listing == [\"older_backups\", \"backup.tar\"]\n", "\n", " def test_listdir_raises_error_if_path_is_file(self, fs_with_folder_structure):\n", " # Act & Assert\n", " with raises(ResourceInvalidError):\n", " fs_with_folder_structure.listdir(\"README.txt\")\n", "\n", " def test_listdir_returns_only_files(self, fs_with_folder_structure):\n", " # Act\n", " listing = sorted(fs_with_folder_structure.listdir(files_only=True))\n", " # Assert\n", " assert listing == [\"README.txt\", \"backup.tar\"]\n", "\n", " def test_listdir_returns_files_and_directories(self, fs_with_folder_structure):\n", " # Act\n", " listing = sorted(fs_with_folder_structure.listdir())\n", " # Assert\n", " assert listing == [\"README.txt\", \"backup.tar\", \"older_backups\"]\n", "\n", " def test_listdirinfo_raises_error_when_not_exists(self, fs):\n", " # Act & Assert\n", " with raises(ResourceNotFoundError):\n", " fs.listdirinfo(\"random_dir\")\n", "\n", " def test_listdirinfo_returns_path_and_infodict(self, fs_with_folder_structure):\n", " # Arrange\n", " info = {}\n", " fs_with_folder_structure.getinfo = Mock(return_value=info)\n", " # Act\n", " listing = sorted(fs_with_folder_structure.listdirinfo(), key=itemgetter(0))\n", " # Assert\n", " assert listing == [(\"README.txt\", info), (\"backup.tar\", info), (\"older_backups\", info)]\n", "\n", " def test_open_if_w_in_mode_all_parts_should_be_removed(self, fs_with_test_file):\n", " # Arrange\n", " fs_with_test_file.remove = Mock()\n", " # Act\n", " fs_with_test_file.open(\"backup.tar\", mode=\"w\")\n", " # Assert\n", " fs_with_test_file.remove.assert_called_once_with(\"backup.tar\")\n", "\n", " def test_open_raises_error_if_w_and_a_not_in_mode(self, fs):\n", " # Act & Assert\n", " with raises(ResourceNotFoundError):\n", " fs.open(\"i_dont_exist\", mode=\"r\")\n", "\n", " def test_open_raises_error_if_path_is_directory(self, fs):\n", " # Arrange\n", " fs.makedir(\"backups\")\n", " # Act & Assert\n", " with raises(ResourceInvalidError):\n", " fs.open(\"backups\", mode=\"w\")\n", "\n", " def test_open_raises_error_if_path_does_not_exist(self, fs):\n", " # Arrange\n", " path = \"backup.tar\"\n", " # Act & Assert\n", " with raises(ResourceNotFoundError):\n", " fs.open(path, mode=\"r\")\n", " with raises(ResourceNotFoundError):\n", " fs.open(path, mode=\"r+\")\n", "\n", " def test_open_creates_empty_file_if_path_does_not_exist(self, fs):\n", " # Arrange\n", " path = \"backup.tar\"\n", " # Act\n", " f = fs.open(path, mode=\"w\")\n", " # Assert\n", " assert len(f.parts) == 1\n", "\n", " def test_open_uses_existing_parts_if_path_exists(self, fs_with_test_file):\n", " # Act\n", " f = fs_with_test_file.open(\"backup.tar\", mode=\"r+\")\n", " # Assert\n", " assert len(f.parts) == 2\n", "\n", " def test_open_with_existing_parts_opens_them_in_correct_order(self, fs):\n", " # Arrange\n", " fs.wrapped_fs.setcontents(\"backup.tar.part0\", data=urandom(kb(4)))\n", " fs.wrapped_fs.setcontents(\"backup.tar.part1\", data=urandom(kb(4)))\n", " fs.wrapped_fs.setcontents(\"backup.tar.part2\", data=urandom(kb(2)))\n", " # Act\n", " f = fs.open(\"backup.tar\", mode=\"r+\")\n", " # Assert\n", " created_parts = [part.name for part in f.parts]\n", " assert created_parts == [\"backup.tar.part0\", \"backup.tar.part1\", \"backup.tar.part2\"]\n", "\n", " def test_open_file_in_directory_returns_file(self, fs):\n", " # Arrange\n", " fs.wrapped_fs.makedir(\"foo/bar\", recursive=True)\n", " fs.setcontents(\"foo/bar/backup.tar\", urandom(kb(4)))\n", " # Act\n", " assert fs.getsize(\"foo/bar/backup.tar\") == kb(4)\n", "\n", " def test_rename_raises_error_if_not_exists(self, fs):\n", " # Act & Assert\n", " with raises(ResourceNotFoundError):\n", " fs.rename(\"you_cant_name_me\", \"you_cant_name_me2\")\n", "\n", " def test_rename_renames_all_parts(self, fs_with_test_file):\n", " # Arrange\n", " fs_with_test_file.wrapped_fs.rename = Mock()\n", " # Act\n", " fs_with_test_file.rename(\"backup.tar\", \"backup2.tar\")\n", " # Assert\n", " fs_with_test_file.wrapped_fs.rename.assert_has_calls([\n", " call(\"backup.tar.part0\", \"backup2.tar.part0\"),\n", " call(\"backup.tar.part1\", \"backup2.tar.part1\")], any_order=True)\n", "\n", " def test_getsize_returns_sum_of_parts(self, fs_with_test_file):\n", " # Act\n", " size = fs_with_test_file.getsize(\"backup.tar\")\n", " # assert\n", " assert size == kb(8)\n", "\n", " def test_getsize_raises_error_if_not_exists(self, fs):\n", " # Act & Assert\n", " with raises(ResourceNotFoundError):\n", " fs.getsize(\"im_invisible\")\n", "\n", " def test_getinfo_raises_error_if_not_exists(self, fs):\n", " # Act & Assert\n", " with raises(ResourceNotFoundError):\n", " fs.getinfo(\"im_invisible\")\n", "\n", " def test_removedir_calls_underlying_fs(self, fs):\n", " # Arrange\n", " path = \"folder\"\n", " fs.wrapped_fs.removedir = Mock()\n", " # Act\n", " fs.removedir(path)\n", " # Arrange\n", " fs.wrapped_fs.removedir.assert_called_once_with(path)\n", "\n", " def test_isdir_calls_underyling_fs(self, fs):\n", " # Arrange\n", " path = \"/\"\n", " fs.wrapped_fs.isdir = Mock()\n", " # Act\n", " fs.isdir(path)\n", " # Arrange\n", " fs.wrapped_fs.isdir.assert_called_once_with(path)\n", "\n", " def test_makedir_calls_underyling_fs(self, fs):\n", " # Arrange\n", " path = \"folder\"\n", " fs.wrapped_fs.makedir = Mock()\n", " # Act\n", " fs.makedir(path)\n", " # Arrange\n", " fs.wrapped_fs.makedir.assert_called_once_with(path)\n", "\n", " def test_getinfo_for_root_returns_information(self, fs):\n", " # Act\n", " info = fs.getinfo(\"/\")\n", " # Assert\n", " assert \"created_time\" in info\n", " assert \"modified_time\" in info\n", " assert \"accessed_time\" in info\n", "\n", " def test_getinfo_returns_directory_info_for_dir(self, fs):\n", " # Arrange\n", " created = date.today() + timedelta(days=10)\n", " accessed = date.today() + timedelta(days=10)\n", " modfied = date.today() + timedelta(days=10)\n", "\n", " fs.makedir(\"dir\")\n", " fs.wrapped_fs.getinfo = Mock(return_value={\n", " \"created_time\": created,\n", " \"modified_time\": accessed,\n", " \"accessed_time\": modfied})\n", " # Act\n", " info = fs.getinfo(\"dir\")\n", " # Assert\n", " assert info[\"created_time\"] == created\n", " assert info[\"modified_time\"] == accessed\n", " assert info[\"accessed_time\"] == modfied\n", "\n", " def test_getinfo_returns_latest_times(self, fs_with_test_file):\n", " # Arrange\n", " created_max = date.today() + timedelta(days=10)\n", " accessed_max = date.today() + timedelta(days=10)\n", " modfied_max = date.today() + timedelta(days=10)\n", "\n", " def getinfo_patch(path):\n", " if path == \"backup.tar.part0\":\n", " return {\"created_time\": created_max,\n", " \"modified_time\": date.today(),\n", " \"accessed_time\": accessed_max}\n", " else:\n", " return {\"created_time\": date.today(),\n", " \"modified_time\": modfied_max,\n", " \"accessed_time\": date.today()}\n", "\n", " fs_with_test_file.wrapped_fs.getinfo = getinfo_patch\n", " fs_with_test_file.getsize = lambda p: kb(7)\n", " # Act\n", " info = fs_with_test_file.getinfo(\"backup.tar\")\n", " # Assert\n", " assert info[\"created_time\"] == created_max\n", " assert info[\"accessed_time\"] == accessed_max\n", " assert info[\"modified_time\"] == modfied_max\n", "\n", " def test_getinfo_returns_info_of_parts(self, fs_with_test_file):\n", " # Act\n", " info = fs_with_test_file.getinfo(\"backup.tar\")\n", " part_infos = info['parts']\n", " # Assert\n", " assert len(part_infos) == 2\n", "\n", " def test_getinfo_returns_correct_size(self, fs_with_test_file):\n", " # Act\n", " info = fs_with_test_file.getinfo(\"backup.tar\")\n", " # Assert\n", " assert info[\"size\"] == kb(8)\n", "\n", " def test_copy_raises_error_if_not_exists(self, fs):\n", " # Act & Assert\n", " with raises(ResourceNotFoundError):\n", " fs.getinfo(\"copy_me_if_you_can\")\n", "\n", " def test_copy_copies_the_parts(self, fs_with_test_file):\n", " # Arrange\n", " fs_with_test_file.makedir(\"copy_folder\")\n", " fs_with_test_file.wrapped_fs.copy = Mock()\n", " # Act\n", " fs_with_test_file.copy(\"backup.tar\", \"copy_folder/backup.tar\")\n", " # Assert\n", " fs_with_test_file.wrapped_fs.copy.assert_has_calls([\n", " call(\"backup.tar.part0\", \"copy_folder/backup.tar.part0\"),\n", " call(\"backup.tar.part1\", \"copy_folder/backup.tar.part1\")], any_order=True)\n", "\n", " def test_setcontents_creates_file(self, fs):\n", " # Act\n", " fs.setcontents(\"backup.tar\", urandom(kb(6)))\n", " # Assert\n", " assert fs.getsize(\"backup.tar\") == kb(6)\n", "\n", " def test_getcontents_reads_file(self, fs):\n", " # Arrange\n", " data = urandom(kb(6))\n", " fs.setcontents(\"backup.tar\", data)\n", " # Act\n", " saved_data = fs.getcontents(\"backup.tar\")\n", " # Assert\n", " assert saved_data == data\n", "\n", "\n", "class TestPartedFile(object):\n", " @fixture\n", " def parted_file(self):\n", " fs = MemoryFS()\n", " mode = \"wb+\"\n", " path = \"cuckoo.tar\"\n", " parts = [FilePart(fs.open(\"cuckoo.tar.part0\", mode)), (fs.open(\"cuckoo.tar.part1\", mode))]\n", " return PartedFile(path=path, mode=mode, fs=fs, max_part_size=kb(4), parts=parts)\n", "\n", " def test_current_part_returns_first_part_when_file_pointer_is_zero(self, parted_file):\n", " # Arrange\n", " parted_file._file_pointer = 0\n", " # Act & Assert\n", " assert parted_file.current_part == parted_file.parts[0]\n", "\n", " def test_current_part_returns_last_part_when_file_pointer_is_max_part_size(self, parted_file):\n", " # Arrange\n", " parted_file._file_pointer = kb(4)\n", " # Act & Assert\n", " assert parted_file.current_part == parted_file.parts[1]\n", "\n", " def test_current_part_raises_error_when_file_pointer_is_bigger_than_parts(self, parted_file):\n", " # Arrange\n", " parted_file._mode = \"r\"\n", " parted_file._file_pointer = 4 * kb(4)\n", " # Act & Assert\n", " with raises(InvalidFilePointerLocation):\n", " _ = parted_file.current_part\n", "\n", " def test_write_returns_none_if_all_data_could_be_written(self, parted_file):\n", " # Act\n", " unwritten_data = parted_file._write(urandom(kb(4)))\n", " # Assert\n", " assert unwritten_data is None\n", "\n", " def test_write_returns_data_that_is_bigger_than_max_part_size(self, parted_file):\n", " # Act\n", " unwritten_data = parted_file._write(urandom(kb(5)))\n", " # Assert\n", " assert len(unwritten_data) == kb(1)\n", "\n", " def test_write_with_flushing_mode_calls_itself_until_all_data_is_written(self, parted_file):\n", " # Act\n", " unwritten_data = parted_file._write(urandom(kb(5)), flushing=True)\n", " # Assert\n", " assert unwritten_data is None\n", "\n", " def test_write_sets_file_pointer_to_next_free_position(self, parted_file):\n", " # Act\n", " parted_file._write(urandom(kb(4)))\n", " # Assert\n", " assert parted_file._file_pointer == kb(4)\n", "\n", " def test_write_big_amount_expands_to_parts(self, parted_file):\n", " # Act\n", " parted_file._write(urandom(kb(12)), flushing=True)\n", " # Assert\n", " assert len(parted_file.parts) == 3\n", "\n", " def test_seek_absolute_should_set_filepointer_to_offset(self, parted_file):\n", " # Arrange\n", " parted_file._file_pointer = kb(1)\n", " # Act\n", " parted_file._seek(offset=kb(0), whence=0)\n", " # Assert\n", " assert parted_file._file_pointer == kb(0)\n", "\n", " def test_seek_goes_to_current_part_and_sets_other_parts_to_start(self, parted_file):\n", " # Arrange\n", " parted_file.parts[0].seek = Mock()\n", " parted_file.parts[1].seek = Mock()\n", " # Act\n", " parted_file._seek(offset=kb(5), whence=0)\n", " # Assert\n", " parted_file.parts[0].seek.assert_called_once_with(kb(0), 0)\n", " parted_file.parts[1].seek.assert_called_once_with(kb(1), 0)\n", "\n", " def test_seek_relative_should_add_ofset_to_filepointer(self, parted_file):\n", " # Arrange\n", " parted_file._file_pointer = kb(1)\n", " # Act\n", " parted_file._seek(offset=kb(1), whence=1)\n", " # Assert\n", " assert parted_file._file_pointer == kb(2)\n", "\n", " @mark.xfail\n", " def test_seek_relative_to_end_should_set_filepointer_to_last_part(self, parted_file):\n", " # Act\n", " parted_file._seek(offset=-kb(4), whence=2)\n", " # Assert\n", " assert parted_file._file_pointer == kb(4)\n", "\n", " def test_tell_returns_file_pointer(self, parted_file):\n", " # Arrange\n", " parted_file._file_pointer = kb(2)\n", " # Act\n", " pos = parted_file._tell()\n", " # Assert\n", " assert pos == kb(2)\n", "\n", " def test_read_returns_data_from_current_part_and_calls_itself_for_next_part(self, parted_file):\n", " # Arrange\n", " parted_file._write(urandom(kb(5)), flushing=True)\n", " parted_file._seek(offset=kb(3), whence=0)\n", " # Act\n", " read_data = parted_file._read()\n", " # Assert\n", " assert len(read_data) == kb(2)\n", "\n", " def test_read_returns_data_from_current_part_in_chunks(self, parted_file):\n", " # Arrange\n", " parted_file._write(urandom(kb(5)), flushing=True)\n", " parted_file._seek(offset=kb(3), whence=0)\n", " # Act\n", " chunk1 = parted_file._read(kb(1))\n", " chunk2 = parted_file._read(kb(1))\n", " # Assert\n", " assert len(chunk1) == kb(1)\n", " assert len(chunk2) == kb(1)\n", "\n", " def test_read_returns_only_data_of_current_part_with_bigger_sizehint(self, parted_file):\n", " # Arrange\n", " parted_file._write(urandom(kb(5)), flushing=True)\n", " parted_file._seek(offset=kb(3), whence=0)\n", " # Act\n", " chunk = parted_file._read(kb(2))\n", " # Assert\n", " assert len(chunk) == kb(1)\n", "\n", " def test_read_returns_none_after_read_all(self, parted_file):\n", " # Arrange\n", " parted_file._write(urandom(kb(5)), flushing=True)\n", " parted_file._seek(offset=0, whence=0)\n", " parted_file._read()\n", " # Act\n", " eof = parted_file._read()\n", " # Assert\n", " assert eof is None\n", "\n", " def test_read_as_chunks_returns_none_at_end_of_file(self, parted_file):\n", " # Arrange\n", " parted_file._write(urandom(kb(5)), flushing=True)\n", " parted_file._seek(offset=kb(4), whence=0)\n", " parted_file._read(kb(1))\n", " # Act\n", " eof = parted_file._read(kb(1))\n", " # Assert\n", " assert eof is None\n", "\n", " def test_close_calls_super_for_flush_and_closes_all_parts(self, parted_file):\n", " # Arrange\n", " parted_file._write(urandom(kb(4)))\n", " parted_file.parts[0].close = Mock()\n", " parted_file.parts[1].close = Mock()\n", " # Act\n", " parted_file.close()\n", " # Assert\n", " parted_file.parts[0].close.assert_called_with()\n", " parted_file.parts[1].close.assert_called_with()\n" ]
[ 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011235955056179775, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0.011904761904761904, 0, 0.010416666666666666, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010752688172043012, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010101010101010102, 0.011235955056179775, 0, 0.01098901098901099, 0, 0, 0, 0, 0, 0.010101010101010102, 0, 0, 0, 0, 0, 0.01020408163265306, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0.010309278350515464, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011235955056179775, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010752688172043012, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
535
0.000524
""" WE'RE USING MIGRATIONS! If you make changes to this model, be sure to create an appropriate migration file and check it in at the same time as your model changes. To do that, 1. Go to the edx-platform dir 2. ./manage.py schemamigration courseware --auto description_of_your_change 3. Add the migration file created in edx-platform/lms/djangoapps/courseware/migrations/ ASSUMPTIONS: modules have unique IDs, even across different module_types """ import logging import itertools from django.contrib.auth.models import User from django.conf import settings from django.db import models from django.db.models.signals import post_save from django.dispatch import receiver, Signal from model_utils.models import TimeStampedModel from student.models import user_by_anonymous_id from submissions.models import score_set, score_reset from xmodule_django.models import CourseKeyField, LocationKeyField, BlockTypeKeyField # pylint: disable=import-error log = logging.getLogger("edx.courseware") def chunks(items, chunk_size): """ Yields the values from items in chunks of size chunk_size """ items = list(items) return (items[i:i + chunk_size] for i in xrange(0, len(items), chunk_size)) class ChunkingManager(models.Manager): """ :class:`~Manager` that adds an additional method :meth:`chunked_filter` to provide the ability to make select queries with specific chunk sizes. """ def chunked_filter(self, chunk_field, items, **kwargs): """ Queries model_class with `chunk_field` set to chunks of size `chunk_size`, and all other parameters from `**kwargs`. This works around a limitation in sqlite3 on the number of parameters that can be put into a single query. Arguments: chunk_field (str): The name of the field to chunk the query on. items: The values for of chunk_field to select. This is chunked into ``chunk_size`` chunks, and passed as the value for the ``chunk_field`` keyword argument to :meth:`~Manager.filter`. This implies that ``chunk_field`` should be an ``__in`` key. chunk_size (int): The size of chunks to pass. Defaults to 500. """ chunk_size = kwargs.pop('chunk_size', 500) res = itertools.chain.from_iterable( self.filter(**dict([(chunk_field, chunk)] + kwargs.items())) for chunk in chunks(items, chunk_size) ) return res class StudentModule(models.Model): """ Keeps student state for a particular module in a particular course. """ objects = ChunkingManager() MODEL_TAGS = ['course_id', 'module_type'] # For a homework problem, contains a JSON # object consisting of state MODULE_TYPES = (('problem', 'problem'), ('video', 'video'), ('html', 'html'), ('course', 'course'), ('chapter', 'Section'), ('sequential', 'Subsection'), ('library_content', 'Library Content')) ## These three are the key for the object module_type = models.CharField(max_length=32, choices=MODULE_TYPES, default='problem', db_index=True) # Key used to share state. This is the XBlock usage_id module_state_key = LocationKeyField(max_length=255, db_index=True, db_column='module_id') student = models.ForeignKey(User, db_index=True) course_id = CourseKeyField(max_length=255, db_index=True) class Meta(object): # pylint: disable=missing-docstring unique_together = (('student', 'module_state_key', 'course_id'),) ## Internal state of the object state = models.TextField(null=True, blank=True) ## Grade, and are we done? grade = models.FloatField(null=True, blank=True, db_index=True) max_grade = models.FloatField(null=True, blank=True) DONE_TYPES = ( ('na', 'NOT_APPLICABLE'), ('f', 'FINISHED'), ('i', 'INCOMPLETE'), ) done = models.CharField(max_length=8, choices=DONE_TYPES, default='na', db_index=True) created = models.DateTimeField(auto_now_add=True, db_index=True) modified = models.DateTimeField(auto_now=True, db_index=True) @classmethod def all_submitted_problems_read_only(cls, course_id): """ Return all model instances that correspond to problems that have been submitted for a given course. So module_type='problem' and a non-null grade. Use a read replica if one exists for this environment. """ queryset = cls.objects.filter( course_id=course_id, module_type='problem', grade__isnull=False ) if "read_replica" in settings.DATABASES: return queryset.using("read_replica") else: return queryset def __repr__(self): return 'StudentModule<%r>' % ({ 'course_id': self.course_id, 'module_type': self.module_type, 'student': self.student.username, # pylint: disable=no-member 'module_state_key': self.module_state_key, 'state': str(self.state)[:20], },) def __unicode__(self): return unicode(repr(self)) class StudentModuleHistory(models.Model): """Keeps a complete history of state changes for a given XModule for a given Student. Right now, we restrict this to problems so that the table doesn't explode in size.""" HISTORY_SAVING_TYPES = {'problem'} class Meta(object): # pylint: disable=missing-docstring get_latest_by = "created" student_module = models.ForeignKey(StudentModule, db_index=True) version = models.CharField(max_length=255, null=True, blank=True, db_index=True) # This should be populated from the modified field in StudentModule created = models.DateTimeField(db_index=True) state = models.TextField(null=True, blank=True) grade = models.FloatField(null=True, blank=True) max_grade = models.FloatField(null=True, blank=True) @receiver(post_save, sender=StudentModule) def save_history(sender, instance, **kwargs): # pylint: disable=no-self-argument, unused-argument """ Checks the instance's module_type, and creates & saves a StudentModuleHistory entry if the module_type is one that we save. """ if instance.module_type in StudentModuleHistory.HISTORY_SAVING_TYPES: history_entry = StudentModuleHistory(student_module=instance, version=None, created=instance.modified, state=instance.state, grade=instance.grade, max_grade=instance.max_grade) history_entry.save() class XBlockFieldBase(models.Model): """ Base class for all XBlock field storage. """ objects = ChunkingManager() class Meta(object): # pylint: disable=missing-docstring abstract = True # The name of the field field_name = models.CharField(max_length=64, db_index=True) # The value of the field. Defaults to None dumped as json value = models.TextField(default='null') created = models.DateTimeField(auto_now_add=True, db_index=True) modified = models.DateTimeField(auto_now=True, db_index=True) def __unicode__(self): return u'{}<{!r}'.format( self.__class__.__name__, { key: getattr(self, key) for key in self._meta.get_all_field_names() if key not in ('created', 'modified') } ) class XModuleUserStateSummaryField(XBlockFieldBase): """ Stores data set in the Scope.user_state_summary scope by an xmodule field """ class Meta(object): # pylint: disable=missing-docstring unique_together = (('usage_id', 'field_name'),) # The definition id for the module usage_id = LocationKeyField(max_length=255, db_index=True) class XModuleStudentPrefsField(XBlockFieldBase): """ Stores data set in the Scope.preferences scope by an xmodule field """ class Meta(object): # pylint: disable=missing-docstring unique_together = (('student', 'module_type', 'field_name'),) # The type of the module for these preferences module_type = BlockTypeKeyField(max_length=64, db_index=True) student = models.ForeignKey(User, db_index=True) class XModuleStudentInfoField(XBlockFieldBase): """ Stores data set in the Scope.preferences scope by an xmodule field """ class Meta(object): # pylint: disable=missing-docstring unique_together = (('student', 'field_name'),) student = models.ForeignKey(User, db_index=True) class OfflineComputedGrade(models.Model): """ Table of grades computed offline for a given user and course. """ user = models.ForeignKey(User, db_index=True) course_id = CourseKeyField(max_length=255, db_index=True) created = models.DateTimeField(auto_now_add=True, null=True, db_index=True) updated = models.DateTimeField(auto_now=True, db_index=True) gradeset = models.TextField(null=True, blank=True) # grades, stored as JSON class Meta(object): # pylint: disable=missing-docstring unique_together = (('user', 'course_id'), ) def __unicode__(self): return "[OfflineComputedGrade] %s: %s (%s) = %s" % (self.user, self.course_id, self.created, self.gradeset) class OfflineComputedGradeLog(models.Model): """ Log of when offline grades are computed. Use this to be able to show instructor when the last computed grades were done. """ class Meta(object): # pylint: disable=missing-docstring ordering = ["-created"] get_latest_by = "created" course_id = CourseKeyField(max_length=255, db_index=True) created = models.DateTimeField(auto_now_add=True, null=True, db_index=True) seconds = models.IntegerField(default=0) # seconds elapsed for computation nstudents = models.IntegerField(default=0) def __unicode__(self): return "[OCGLog] %s: %s" % (self.course_id.to_deprecated_string(), self.created) # pylint: disable=no-member class StudentFieldOverride(TimeStampedModel): """ Holds the value of a specific field overriden for a student. This is used by the code in the `courseware.student_field_overrides` module to provide overrides of xblock fields on a per user basis. """ course_id = CourseKeyField(max_length=255, db_index=True) location = LocationKeyField(max_length=255, db_index=True) student = models.ForeignKey(User, db_index=True) class Meta(object): # pylint: disable=missing-docstring unique_together = (('course_id', 'field', 'location', 'student'),) field = models.CharField(max_length=255) value = models.TextField(default='null') # Signal that indicates that a user's score for a problem has been updated. # This signal is generated when a scoring event occurs either within the core # platform or in the Submissions module. Note that this signal will be triggered # regardless of the new and previous values of the score (i.e. it may be the # case that this signal is generated when a user re-attempts a problem but # receives the same score). SCORE_CHANGED = Signal( providing_args=[ 'points_possible', # Maximum score available for the exercise 'points_earned', # Score obtained by the user 'user_id', # Integer User ID 'course_id', # Unicode string representing the course 'usage_id' # Unicode string indicating the courseware instance ] ) @receiver(score_set) def submissions_score_set_handler(sender, **kwargs): # pylint: disable=unused-argument """ Consume the score_set signal defined in the Submissions API, and convert it to a SCORE_CHANGED signal defined in this module. Converts the unicode keys for user, course and item into the standard representation for the SCORE_CHANGED signal. This method expects that the kwargs dictionary will contain the following entries (See the definition of score_set): - 'points_possible': integer, - 'points_earned': integer, - 'anonymous_user_id': unicode, - 'course_id': unicode, - 'item_id': unicode """ points_possible = kwargs.get('points_possible', None) points_earned = kwargs.get('points_earned', None) course_id = kwargs.get('course_id', None) usage_id = kwargs.get('item_id', None) user = None if 'anonymous_user_id' in kwargs: user = user_by_anonymous_id(kwargs.get('anonymous_user_id')) # If any of the kwargs were missing, at least one of the following values # will be None. if all((user, points_possible, points_earned, course_id, usage_id)): SCORE_CHANGED.send( sender=None, points_possible=points_possible, points_earned=points_earned, user_id=user.id, course_id=course_id, usage_id=usage_id ) else: log.exception( u"Failed to process score_set signal from Submissions API. " "points_possible: %s, points_earned: %s, user: %s, course_id: %s, " "usage_id: %s", points_possible, points_earned, user, course_id, usage_id ) @receiver(score_reset) def submissions_score_reset_handler(sender, **kwargs): # pylint: disable=unused-argument """ Consume the score_reset signal defined in the Submissions API, and convert it to a SCORE_CHANGED signal indicating that the score has been set to 0/0. Converts the unicode keys for user, course and item into the standard representation for the SCORE_CHANGED signal. This method expects that the kwargs dictionary will contain the following entries (See the definition of score_reset): - 'anonymous_user_id': unicode, - 'course_id': unicode, - 'item_id': unicode """ course_id = kwargs.get('course_id', None) usage_id = kwargs.get('item_id', None) user = None if 'anonymous_user_id' in kwargs: user = user_by_anonymous_id(kwargs.get('anonymous_user_id')) # If any of the kwargs were missing, at least one of the following values # will be None. if all((user, course_id, usage_id)): SCORE_CHANGED.send( sender=None, points_possible=0, points_earned=0, user_id=user.id, course_id=course_id, usage_id=usage_id ) else: log.exception( u"Failed to process score_reset signal from Submissions API. " "user: %s, course_id: %s, usage_id: %s", user, course_id, usage_id )
[ "\"\"\"\n", "WE'RE USING MIGRATIONS!\n", "\n", "If you make changes to this model, be sure to create an appropriate migration\n", "file and check it in at the same time as your model changes. To do that,\n", "\n", "1. Go to the edx-platform dir\n", "2. ./manage.py schemamigration courseware --auto description_of_your_change\n", "3. Add the migration file created in edx-platform/lms/djangoapps/courseware/migrations/\n", "\n", "\n", "ASSUMPTIONS: modules have unique IDs, even across different module_types\n", "\n", "\"\"\"\n", "import logging\n", "import itertools\n", "\n", "from django.contrib.auth.models import User\n", "from django.conf import settings\n", "from django.db import models\n", "from django.db.models.signals import post_save\n", "from django.dispatch import receiver, Signal\n", "\n", "from model_utils.models import TimeStampedModel\n", "from student.models import user_by_anonymous_id\n", "from submissions.models import score_set, score_reset\n", "\n", "from xmodule_django.models import CourseKeyField, LocationKeyField, BlockTypeKeyField # pylint: disable=import-error\n", "\n", "log = logging.getLogger(\"edx.courseware\")\n", "\n", "\n", "def chunks(items, chunk_size):\n", " \"\"\"\n", " Yields the values from items in chunks of size chunk_size\n", " \"\"\"\n", " items = list(items)\n", " return (items[i:i + chunk_size] for i in xrange(0, len(items), chunk_size))\n", "\n", "\n", "class ChunkingManager(models.Manager):\n", " \"\"\"\n", " :class:`~Manager` that adds an additional method :meth:`chunked_filter` to provide\n", " the ability to make select queries with specific chunk sizes.\n", " \"\"\"\n", " def chunked_filter(self, chunk_field, items, **kwargs):\n", " \"\"\"\n", " Queries model_class with `chunk_field` set to chunks of size `chunk_size`,\n", " and all other parameters from `**kwargs`.\n", "\n", " This works around a limitation in sqlite3 on the number of parameters\n", " that can be put into a single query.\n", "\n", " Arguments:\n", " chunk_field (str): The name of the field to chunk the query on.\n", " items: The values for of chunk_field to select. This is chunked into ``chunk_size``\n", " chunks, and passed as the value for the ``chunk_field`` keyword argument to\n", " :meth:`~Manager.filter`. This implies that ``chunk_field`` should be an\n", " ``__in`` key.\n", " chunk_size (int): The size of chunks to pass. Defaults to 500.\n", " \"\"\"\n", " chunk_size = kwargs.pop('chunk_size', 500)\n", " res = itertools.chain.from_iterable(\n", " self.filter(**dict([(chunk_field, chunk)] + kwargs.items()))\n", " for chunk in chunks(items, chunk_size)\n", " )\n", " return res\n", "\n", "\n", "class StudentModule(models.Model):\n", " \"\"\"\n", " Keeps student state for a particular module in a particular course.\n", " \"\"\"\n", " objects = ChunkingManager()\n", "\n", " MODEL_TAGS = ['course_id', 'module_type']\n", "\n", " # For a homework problem, contains a JSON\n", " # object consisting of state\n", " MODULE_TYPES = (('problem', 'problem'),\n", " ('video', 'video'),\n", " ('html', 'html'),\n", " ('course', 'course'),\n", " ('chapter', 'Section'),\n", " ('sequential', 'Subsection'),\n", " ('library_content', 'Library Content'))\n", " ## These three are the key for the object\n", " module_type = models.CharField(max_length=32, choices=MODULE_TYPES, default='problem', db_index=True)\n", "\n", " # Key used to share state. This is the XBlock usage_id\n", " module_state_key = LocationKeyField(max_length=255, db_index=True, db_column='module_id')\n", " student = models.ForeignKey(User, db_index=True)\n", "\n", " course_id = CourseKeyField(max_length=255, db_index=True)\n", "\n", " class Meta(object): # pylint: disable=missing-docstring\n", " unique_together = (('student', 'module_state_key', 'course_id'),)\n", "\n", " ## Internal state of the object\n", " state = models.TextField(null=True, blank=True)\n", "\n", " ## Grade, and are we done?\n", " grade = models.FloatField(null=True, blank=True, db_index=True)\n", " max_grade = models.FloatField(null=True, blank=True)\n", " DONE_TYPES = (\n", " ('na', 'NOT_APPLICABLE'),\n", " ('f', 'FINISHED'),\n", " ('i', 'INCOMPLETE'),\n", " )\n", " done = models.CharField(max_length=8, choices=DONE_TYPES, default='na', db_index=True)\n", "\n", " created = models.DateTimeField(auto_now_add=True, db_index=True)\n", " modified = models.DateTimeField(auto_now=True, db_index=True)\n", "\n", " @classmethod\n", " def all_submitted_problems_read_only(cls, course_id):\n", " \"\"\"\n", " Return all model instances that correspond to problems that have been\n", " submitted for a given course. So module_type='problem' and a non-null\n", " grade. Use a read replica if one exists for this environment.\n", " \"\"\"\n", " queryset = cls.objects.filter(\n", " course_id=course_id,\n", " module_type='problem',\n", " grade__isnull=False\n", " )\n", " if \"read_replica\" in settings.DATABASES:\n", " return queryset.using(\"read_replica\")\n", " else:\n", " return queryset\n", "\n", " def __repr__(self):\n", " return 'StudentModule<%r>' % ({\n", " 'course_id': self.course_id,\n", " 'module_type': self.module_type,\n", " 'student': self.student.username, # pylint: disable=no-member\n", " 'module_state_key': self.module_state_key,\n", " 'state': str(self.state)[:20],\n", " },)\n", "\n", " def __unicode__(self):\n", " return unicode(repr(self))\n", "\n", "\n", "class StudentModuleHistory(models.Model):\n", " \"\"\"Keeps a complete history of state changes for a given XModule for a given\n", " Student. Right now, we restrict this to problems so that the table doesn't\n", " explode in size.\"\"\"\n", "\n", " HISTORY_SAVING_TYPES = {'problem'}\n", "\n", " class Meta(object): # pylint: disable=missing-docstring\n", " get_latest_by = \"created\"\n", "\n", " student_module = models.ForeignKey(StudentModule, db_index=True)\n", " version = models.CharField(max_length=255, null=True, blank=True, db_index=True)\n", "\n", " # This should be populated from the modified field in StudentModule\n", " created = models.DateTimeField(db_index=True)\n", " state = models.TextField(null=True, blank=True)\n", " grade = models.FloatField(null=True, blank=True)\n", " max_grade = models.FloatField(null=True, blank=True)\n", "\n", " @receiver(post_save, sender=StudentModule)\n", " def save_history(sender, instance, **kwargs): # pylint: disable=no-self-argument, unused-argument\n", " \"\"\"\n", " Checks the instance's module_type, and creates & saves a\n", " StudentModuleHistory entry if the module_type is one that\n", " we save.\n", " \"\"\"\n", " if instance.module_type in StudentModuleHistory.HISTORY_SAVING_TYPES:\n", " history_entry = StudentModuleHistory(student_module=instance,\n", " version=None,\n", " created=instance.modified,\n", " state=instance.state,\n", " grade=instance.grade,\n", " max_grade=instance.max_grade)\n", " history_entry.save()\n", "\n", "\n", "class XBlockFieldBase(models.Model):\n", " \"\"\"\n", " Base class for all XBlock field storage.\n", " \"\"\"\n", " objects = ChunkingManager()\n", "\n", " class Meta(object): # pylint: disable=missing-docstring\n", " abstract = True\n", "\n", " # The name of the field\n", " field_name = models.CharField(max_length=64, db_index=True)\n", "\n", " # The value of the field. Defaults to None dumped as json\n", " value = models.TextField(default='null')\n", "\n", " created = models.DateTimeField(auto_now_add=True, db_index=True)\n", " modified = models.DateTimeField(auto_now=True, db_index=True)\n", "\n", " def __unicode__(self):\n", " return u'{}<{!r}'.format(\n", " self.__class__.__name__,\n", " {\n", " key: getattr(self, key)\n", " for key in self._meta.get_all_field_names()\n", " if key not in ('created', 'modified')\n", " }\n", " )\n", "\n", "\n", "class XModuleUserStateSummaryField(XBlockFieldBase):\n", " \"\"\"\n", " Stores data set in the Scope.user_state_summary scope by an xmodule field\n", " \"\"\"\n", "\n", " class Meta(object): # pylint: disable=missing-docstring\n", " unique_together = (('usage_id', 'field_name'),)\n", "\n", " # The definition id for the module\n", " usage_id = LocationKeyField(max_length=255, db_index=True)\n", "\n", "\n", "class XModuleStudentPrefsField(XBlockFieldBase):\n", " \"\"\"\n", " Stores data set in the Scope.preferences scope by an xmodule field\n", " \"\"\"\n", "\n", " class Meta(object): # pylint: disable=missing-docstring\n", " unique_together = (('student', 'module_type', 'field_name'),)\n", "\n", " # The type of the module for these preferences\n", " module_type = BlockTypeKeyField(max_length=64, db_index=True)\n", "\n", " student = models.ForeignKey(User, db_index=True)\n", "\n", "\n", "class XModuleStudentInfoField(XBlockFieldBase):\n", " \"\"\"\n", " Stores data set in the Scope.preferences scope by an xmodule field\n", " \"\"\"\n", "\n", " class Meta(object): # pylint: disable=missing-docstring\n", " unique_together = (('student', 'field_name'),)\n", "\n", " student = models.ForeignKey(User, db_index=True)\n", "\n", "\n", "class OfflineComputedGrade(models.Model):\n", " \"\"\"\n", " Table of grades computed offline for a given user and course.\n", " \"\"\"\n", " user = models.ForeignKey(User, db_index=True)\n", " course_id = CourseKeyField(max_length=255, db_index=True)\n", "\n", " created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)\n", " updated = models.DateTimeField(auto_now=True, db_index=True)\n", "\n", " gradeset = models.TextField(null=True, blank=True)\t\t# grades, stored as JSON\n", "\n", " class Meta(object): # pylint: disable=missing-docstring\n", " unique_together = (('user', 'course_id'), )\n", "\n", " def __unicode__(self):\n", " return \"[OfflineComputedGrade] %s: %s (%s) = %s\" % (self.user, self.course_id, self.created, self.gradeset)\n", "\n", "\n", "class OfflineComputedGradeLog(models.Model):\n", " \"\"\"\n", " Log of when offline grades are computed.\n", " Use this to be able to show instructor when the last computed grades were done.\n", " \"\"\"\n", " class Meta(object): # pylint: disable=missing-docstring\n", " ordering = [\"-created\"]\n", " get_latest_by = \"created\"\n", "\n", " course_id = CourseKeyField(max_length=255, db_index=True)\n", " created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)\n", " seconds = models.IntegerField(default=0) \t# seconds elapsed for computation\n", " nstudents = models.IntegerField(default=0)\n", "\n", " def __unicode__(self):\n", " return \"[OCGLog] %s: %s\" % (self.course_id.to_deprecated_string(), self.created) # pylint: disable=no-member\n", "\n", "\n", "class StudentFieldOverride(TimeStampedModel):\n", " \"\"\"\n", " Holds the value of a specific field overriden for a student. This is used\n", " by the code in the `courseware.student_field_overrides` module to provide\n", " overrides of xblock fields on a per user basis.\n", " \"\"\"\n", " course_id = CourseKeyField(max_length=255, db_index=True)\n", " location = LocationKeyField(max_length=255, db_index=True)\n", " student = models.ForeignKey(User, db_index=True)\n", "\n", " class Meta(object): # pylint: disable=missing-docstring\n", " unique_together = (('course_id', 'field', 'location', 'student'),)\n", "\n", " field = models.CharField(max_length=255)\n", " value = models.TextField(default='null')\n", "\n", "\n", "# Signal that indicates that a user's score for a problem has been updated.\n", "# This signal is generated when a scoring event occurs either within the core\n", "# platform or in the Submissions module. Note that this signal will be triggered\n", "# regardless of the new and previous values of the score (i.e. it may be the\n", "# case that this signal is generated when a user re-attempts a problem but\n", "# receives the same score).\n", "SCORE_CHANGED = Signal(\n", " providing_args=[\n", " 'points_possible', # Maximum score available for the exercise\n", " 'points_earned', # Score obtained by the user\n", " 'user_id', # Integer User ID\n", " 'course_id', # Unicode string representing the course\n", " 'usage_id' # Unicode string indicating the courseware instance\n", " ]\n", ")\n", "\n", "\n", "@receiver(score_set)\n", "def submissions_score_set_handler(sender, **kwargs): # pylint: disable=unused-argument\n", " \"\"\"\n", " Consume the score_set signal defined in the Submissions API, and convert it\n", " to a SCORE_CHANGED signal defined in this module. Converts the unicode keys\n", " for user, course and item into the standard representation for the\n", " SCORE_CHANGED signal.\n", "\n", " This method expects that the kwargs dictionary will contain the following\n", " entries (See the definition of score_set):\n", " - 'points_possible': integer,\n", " - 'points_earned': integer,\n", " - 'anonymous_user_id': unicode,\n", " - 'course_id': unicode,\n", " - 'item_id': unicode\n", " \"\"\"\n", " points_possible = kwargs.get('points_possible', None)\n", " points_earned = kwargs.get('points_earned', None)\n", " course_id = kwargs.get('course_id', None)\n", " usage_id = kwargs.get('item_id', None)\n", " user = None\n", " if 'anonymous_user_id' in kwargs:\n", " user = user_by_anonymous_id(kwargs.get('anonymous_user_id'))\n", "\n", " # If any of the kwargs were missing, at least one of the following values\n", " # will be None.\n", " if all((user, points_possible, points_earned, course_id, usage_id)):\n", " SCORE_CHANGED.send(\n", " sender=None,\n", " points_possible=points_possible,\n", " points_earned=points_earned,\n", " user_id=user.id,\n", " course_id=course_id,\n", " usage_id=usage_id\n", " )\n", " else:\n", " log.exception(\n", " u\"Failed to process score_set signal from Submissions API. \"\n", " \"points_possible: %s, points_earned: %s, user: %s, course_id: %s, \"\n", " \"usage_id: %s\", points_possible, points_earned, user, course_id, usage_id\n", " )\n", "\n", "\n", "@receiver(score_reset)\n", "def submissions_score_reset_handler(sender, **kwargs): # pylint: disable=unused-argument\n", " \"\"\"\n", " Consume the score_reset signal defined in the Submissions API, and convert\n", " it to a SCORE_CHANGED signal indicating that the score has been set to 0/0.\n", " Converts the unicode keys for user, course and item into the standard\n", " representation for the SCORE_CHANGED signal.\n", "\n", " This method expects that the kwargs dictionary will contain the following\n", " entries (See the definition of score_reset):\n", " - 'anonymous_user_id': unicode,\n", " - 'course_id': unicode,\n", " - 'item_id': unicode\n", " \"\"\"\n", " course_id = kwargs.get('course_id', None)\n", " usage_id = kwargs.get('item_id', None)\n", " user = None\n", " if 'anonymous_user_id' in kwargs:\n", " user = user_by_anonymous_id(kwargs.get('anonymous_user_id'))\n", "\n", " # If any of the kwargs were missing, at least one of the following values\n", " # will be None.\n", " if all((user, course_id, usage_id)):\n", " SCORE_CHANGED.send(\n", " sender=None,\n", " points_possible=0,\n", " points_earned=0,\n", " user_id=user.id,\n", " course_id=course_id,\n", " usage_id=usage_id\n", " )\n", " else:\n", " log.exception(\n", " u\"Failed to process score_reset signal from Submissions API. \"\n", " \"user: %s, course_id: %s, usage_id: %s\", user, course_id, usage_id\n", " )\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00847457627118644, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0.010416666666666666, 0.010869565217391304, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.021739130434782608, 0.009433962264150943, 0, 0, 0.010638297872340425, 0, 0, 0, 0, 0, 0, 0, 0.027777777777777776, 0, 0, 0.03225806451612903, 0, 0, 0, 0, 0, 0, 0, 0.01098901098901099, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0, 0, 0.009708737864077669, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0.008620689655172414, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0.00847457627118644, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
396
0.000815
import anymarkup from bs4 import BeautifulSoup from collections import defaultdict from functools import cmp_to_key import logging from lxml import etree from operator import itemgetter from pip.req.req_file import parse_requirements import re from requests import get from xmlrpc.client import ServerProxy from semantic_version import Version as semver_version from subprocess import check_output from tempfile import NamedTemporaryFile from urllib.parse import urljoin from f8a_worker.enums import EcosystemBackend from f8a_worker.models import Analysis, Ecosystem, Package, Version from f8a_worker.utils import cwd, tempdir, TimedCommand from f8a_worker.process import Git logger = logging.getLogger(__name__) class SolverException(Exception): pass class Tokens(object): """ Comparison token representation """ operators = ['>=', '<=', '==', '>', '<', '=', '!='] (GTE, LTE, EQ1, GT, LT, EQ2, NEQ) = range(len(operators)) def compare_version(a, b): """ Compare two version strings :param a: str :param b: str :return: -1 / 0 / 1 """ def _range(q): """ Convert a version string to array of integers: "1.2.3" -> [1, 2, 3] :param q: str :return: List[int] """ r = [] for n in q.replace('-', '.').split('.'): try: r.append(int(n)) except ValueError: # sort rc*, alpha, beta etc. lower than their non-annotated counterparts r.append(-1) return r def _append_zeros(x, num_zeros): """ Append `num_zeros` zeros to a copy of `x` and return it :param x: List[int] :param num_zeros: int :return: List[int] """ nx = list(x) for _ in range(num_zeros): nx.append(0) return nx def _cardinal(x, y): """ Make both input lists be of same cardinality :param x: List[int] :param y: List[int] :return: List[int] """ lx, ly = len(x), len(y) if lx == ly: return x, y elif lx > ly: return x, _append_zeros(y, lx - ly) else: return _append_zeros(x, ly - lx), y left, right = _cardinal(_range(a), _range(b)) return (left > right) - (left < right) class ReleasesFetcher(object): def __init__(self, ecosystem): self._ecosystem = ecosystem @property def ecosystem(self): return self._ecosystem def fetch_releases(self, package): return None, None class PypiReleasesFetcher(ReleasesFetcher): def __init__(self, ecosystem): super(PypiReleasesFetcher, self).__init__(ecosystem) self._rpc = ServerProxy(self.ecosystem.fetch_url) def _search_package_name(self, package): """ Case insensitive search :param package: str, Name of the package :return: """ def find_pypi_pkg(package): packages = self._rpc.search({'name': package}) if packages: exact_match = [p['name'] for p in packages if p['name'].lower() == package.lower()] if exact_match: return exact_match.pop() res = find_pypi_pkg(package) if res is None and '-' in package: # this is soooo annoying; you can `pip3 install argon2-cffi and it installs # argon2_cffi (underscore instead of dash), but searching through XMLRPC # API doesn't find it... so we try to search for underscore variant # if the dash variant isn't found res = find_pypi_pkg(package.replace('-', '_')) if res: return res raise ValueError("Package {} not found".format(package)) def fetch_releases(self, package): """ XML-RPC API Documentation: https://wiki.python.org/moin/PyPIXmlRpc Signature: package_releases(package_name, show_hidden=False) """ if not package: raise ValueError("package") releases = self._rpc.package_releases(package, True) if not releases: # try again with swapped case of first character releases = self._rpc.package_releases(package[0].swapcase() + package[1:], True) if not releases: # if nothing was found then do case-insensitive search return self.fetch_releases(self._search_package_name(package)) return package.lower(), releases class NpmReleasesFetcher(ReleasesFetcher): def __init__(self, ecosystem): super(NpmReleasesFetcher, self).__init__(ecosystem) def fetch_releases(self, package): """ Example output from the NPM endpoint: { ... versions: { "0.1.0": {}, "0.1.2": {} ... } } """ if not package: raise ValueError("package") r = get(self.ecosystem.fetch_url + package) if r.status_code == 404: if package.lower() != package: return self.fetch_releases(package.lower()) raise ValueError("Package {} not found".format(package)) if 'versions' not in r.json().keys(): raise ValueError("Package {} does not have associated versions".format(package)) return package, list(r.json()['versions'].keys()) class RubyGemsReleasesFetcher(ReleasesFetcher): def __init__(self, ecosystem): super(RubyGemsReleasesFetcher, self).__init__(ecosystem) def _search_package_name(self, package): url = '{url}/search.json?query={pkg}'.format(url=self.ecosystem.fetch_url, pkg=package) r = get(url) if r.status_code == 200: exact_match = [p['name'] for p in r.json() if p['name'].lower() == package.lower()] if exact_match: return exact_match.pop() raise ValueError("Package {} not found".format(package)) def fetch_releases(self, package): """ Example output from the RubyGems endpoint [ { "number": "1.0.0", ... }, { "number": "2.0.0", ... } ... ] """ if not package: raise ValueError("package") url = '{url}/versions/{pkg}.json'.format(url=self.ecosystem.fetch_url, pkg=package) r = get(url) if r.status_code == 404: return self.fetch_releases(self._search_package_name(package)) return package, [ver['number'] for ver in r.json()] class NugetReleasesFetcher(ReleasesFetcher): def __init__(self, ecosystem): super(NugetReleasesFetcher, self).__init__(ecosystem) @staticmethod def scrape_versions_from_nuget_org(package, sort_by_downloads=False): """ Scrape 'Version History' from https://www.nuget.org/packages/<package> """ releases = [] nuget_packages_url = 'https://www.nuget.org/packages/' page = get(nuget_packages_url + package) page = BeautifulSoup(page.text, 'html.parser') version_history = page.find(class_="version-history") for version in version_history.find_all(href=re.compile('/packages/')): version_text = version.text.replace('(current version)', '').strip() try: semver_version.coerce(version_text) downloads = int(version.find_next('td').text.strip().replace(',', '')) except ValueError: pass else: releases.append((version_text, downloads)) if sort_by_downloads: releases.sort(key=itemgetter(1)) return package, [p[0] for p in reversed(releases)] def fetch_releases(self, package): if not package: raise ValueError("package not specified") # There's an API interface which lists available releases at # https://api.nuget.org/v3-flatcontainer/{package}/index.json # But it lists also unlisted/deprecated/shouldn't-be-used versions, # so we don't use it. return self.scrape_versions_from_nuget_org(package) class MavenReleasesFetcher(ReleasesFetcher): def __init__(self, ecosystem): super().__init__(ecosystem) @staticmethod def releases_from_maven_org(url): releases = [] page = BeautifulSoup(get(url).text, 'html.parser') for link in page.find_all('a'): if link.text.endswith('/') and link.text != '../': releases.append(link.text.rstrip('/')) return releases def fetch_releases(self, package): if not package: raise ValueError("package not specified") try: group_id, artifact_id = package.split(':') except ValueError as exc: raise ValueError("Invalid Maven coordinates: {a}".format(a=package)) from exc maven_url = "http://repo1.maven.org/maven2/" dir_path = "{g}/{a}/".format(g=group_id.replace('.', '/'), a=artifact_id) url = urljoin(maven_url, dir_path) return package, self.releases_from_maven_org(url) class GolangReleasesFetcher(ReleasesFetcher): def __init__(self, ecosystem): super(GolangReleasesFetcher, self).__init__(ecosystem) def fetch_releases(self, package): if not package: raise ValueError('package not specified') parts = package.split("/")[:3] if len(parts) == 3: # this assumes github.com/org/project like structure host, org, proj = parts repo_url = 'git://{host}/{org}/{proj}.git'.format(host=host, org=org, proj=proj) elif len(parts) == 2 and parts[0] == 'gopkg.in': # specific to gopkg.in/packages host, proj = parts repo_url = 'https://{host}/{proj}.git'.format(host=host, proj=proj) else: raise ValueError("Package {} is invalid git repository".format(package)) output = Git.ls_remote(repo_url, args=['-q'], refs=['HEAD']) version, ref = output[0].split() if not version: raise ValueError("Package {} does not have associated versions".format(package)) return package, [version] class F8aReleasesFetcher(ReleasesFetcher): def __init__(self, ecosystem, database): super(F8aReleasesFetcher, self).__init__(ecosystem) self.database = database def fetch_releases(self, package): """ Fetch analysed versions for specific ecosystem + package from f8a """ query = self.database.query(Version).\ join(Analysis).join(Package).join(Ecosystem).\ filter(Package.name == package, Ecosystem.name == self.ecosystem.name, Analysis.finished_at.isnot(None)) versions = {v.identifier for v in query} return package, list(sorted(versions, key=cmp_to_key(compare_version))) class Dependency(object): def __init__(self, name, spec): self._name = name # spec is a list where each item is either 2-tuple (operator, version) or list of these # example: [[('>=', '0.6.0'), ('<', '0.7.0')], ('>', '1.0.0')] means: # (>=0.6.0 and <0.7.0) or >1.0.0 self._spec = spec @property def name(self): return self._name @property def spec(self): return self._spec def __contains__(self, item): return self.check(item) def __repr__(self): return "{} {}".format(self.name, self.spec) def __eq__(self, other): return self.name == other.name and self.spec == other.spec def check(self, version): """ Check if `version` fits into our dependency specification :param version: str :return: bool """ def _compare_spec(spec): if len(spec) == 1: spec = ('=', spec[0]) token = Tokens.operators.index(spec[0]) comparison = compare_version(version, spec[1]) if token in [Tokens.EQ1, Tokens.EQ2]: return comparison == 0 elif token == Tokens.GT: return comparison == 1 elif token == Tokens.LT: return comparison == -1 elif token == Tokens.GTE: return comparison >= 0 elif token == Tokens.LTE: return comparison <= 0 elif token == Tokens.NEQ: return comparison != 0 else: raise ValueError('Invalid comparison token') results, intermediaries = False, False for spec in self.spec: if isinstance(spec, list): intermediary = True for sub in spec: intermediary &= _compare_spec(sub) intermediaries |= intermediary elif isinstance(spec, tuple): results |= _compare_spec(spec) return results or intermediaries class DependencyParser(object): def parse(self, specs): pass @staticmethod def compose_sep(deps, separator): """ Opposite of parse() :param deps: list of Dependency() :return: dict of {name: version spec} """ result = {} for dep in deps: if dep.name not in result: result[dep.name] = separator.join([op + ver for op, ver in dep.spec]) else: result[dep.name] += separator + separator.join([op + ver for op, ver in dep.spec]) return result class PypiDependencyParser(DependencyParser): @staticmethod def _parse_python(spec): """ Parse PyPI specification of a single dependency :param spec: str, for example "Django>=1.5,<1.8" :return: [Django [[('>=', '1.5'), ('<', '1.8')]]] """ def _extract_op_version(spec): # https://www.python.org/dev/peps/pep-0440/#compatible-release if spec.operator == '~=': version = spec.version.split('.') if len(version) in {2, 3, 4}: if len(version) in {3, 4}: del version[-1] # will increase the last but one in next line version[-1] = str(int(version[-1]) + 1) else: raise ValueError('%r must not be used with %r' % (spec.operator, spec.version)) return [('>=', spec.version), ('<', '.'.join(version))] # Trailing .* is permitted per # https://www.python.org/dev/peps/pep-0440/#version-matching elif spec.operator == '==' and spec.version.endswith('.*'): try: result = check_output(['/usr/bin/semver-ranger', spec.version], universal_newlines=True).strip() gte, lt = result.split() return [('>=', gte.lstrip('>=')), ('<', lt.lstrip('<'))] except ValueError: logger.info("couldn't resolve ==%s", spec.version) return spec.operator, spec.version # https://www.python.org/dev/peps/pep-0440/#arbitrary-equality # Use of this operator is heavily discouraged, so just convert it to 'Version matching' elif spec.operator == '===': return '==', spec.version else: return spec.operator, spec.version def _get_pip_spec(requirements): '''In Pip 8+ there's no `specs` field and we have to dig the information from the `specifier` field''' if hasattr(requirements, 'specs'): return requirements.specs elif hasattr(requirements, 'specifier'): specs = [_extract_op_version(spec) for spec in requirements.specifier] if len(specs) == 0: specs = [('>=', '0.0.0')] elif len(specs) > 1: specs = [specs] return specs # create a temporary file and store the spec there since # `parse_requirements` requires a file with NamedTemporaryFile(mode='w+', suffix='pysolve') as f: f.write(spec) f.flush() parsed = parse_requirements(f.name, session=f.name) dependency = [Dependency(x.name, _get_pip_spec(x.req)) for x in parsed].pop() return dependency def parse(self, specs): return [self._parse_python(s) for s in specs] @staticmethod def compose(deps): return DependencyParser.compose_sep(deps, ',') @staticmethod def restrict_versions(deps): return deps # TODO class NpmDependencyParser(DependencyParser): @staticmethod def _parse_npm_tokens(spec): for token in Tokens.operators: if token in spec: return token, spec.split(token)[1] return spec, def _parse_npm(self, name, spec): """ Parse NPM specification of a single dependency :param name: str :param spec: str :return: Dependency """ specs = check_output(['/usr/bin/semver-ranger', spec], universal_newlines=True).strip() if specs == 'null': logger.info("invalid version specification for %s = %s", name, spec) return None ret = [] for s in specs.split('||'): if ' ' in s: spaced = s.split(' ') assert len(spaced) == 2 left, right = spaced ret.append([self._parse_npm_tokens(left), self._parse_npm_tokens(right)]) elif s == '*': ret.append(('>=', '0.0.0')) else: ret.append(self._parse_npm_tokens(s)) return Dependency(name, ret) def parse(self, specs): deps = [] for spec in specs: name, ver = spec.split(' ', 1) parsed = self._parse_npm(name, ver) if parsed: deps.append(parsed) return deps @staticmethod def compose(deps): return DependencyParser.compose_sep(deps, ' ') @staticmethod def restrict_versions(deps): """ From list of semver ranges select only the most restricting ones for each operator. :param deps: list of Dependency(), example: [node [('>=', '0.6.0')], node [('<', '1.0.0')], node [('>=', '0.8.0')]] :return: list of Dependency() with only the most restrictive versions, example: [node [('<', '1.0.0')], node [('>=', '0.8.0')]] """ # list to dict # { # 'node' : { # '>=': ['0.8.0', '0.6.0'], # '<': ['1.0.0'] # } # } dps_dict = defaultdict(dict) for dp in deps: if dp.name not in dps_dict: dps_dict[dp.name] = defaultdict(list) for spec in dp.spec: if len(spec) != 2: continue operator, version = spec dps_dict[dp.name][operator].append(version) # select only the most restrictive versions result = [] for name, version_spec_dict in dps_dict.items(): specs = [] for operator, versions in version_spec_dict.items(): if operator in ['>', '>=']: # select highest version version = sorted(versions, key=cmp_to_key(compare_version))[-1] elif operator in ['<', '<=']: # select lowest version version = sorted(versions, key=cmp_to_key(compare_version))[0] specs.append((operator, version)) # dict back to list result.append(Dependency(name, specs)) return result RubyGemsDependencyParser = NpmDependencyParser class OSSIndexDependencyParser(NpmDependencyParser): def _parse_npm(self, name, spec): """ Parse OSS Index version specification. It's similar to NPM semver, with few tweaks. """ # sometimes there's '|' instead of '||', but the meaning seems to be the same spec = spec.replace(' | ', ' || ') # remove superfluous brackets spec = spec.replace('(', '').replace(')', '') return super()._parse_npm(name, spec) class NugetDependencyParser(object): # https://docs.microsoft.com/en-us/nuget/create-packages/dependency-versions#version-ranges def parse(self, specs): deps = [] for spec in specs: name, version_range = spec.split(' ', 1) # 1.0 -> 1.0≤x if re.search('[,()\[\]]', version_range) is None: dep = Dependency(name, [('>=', version_range)]) # [1.0,2.0] -> 1.0≤x≤2.0 elif re.fullmatch(r'\[(.+),(.+)\]', version_range): m = re.fullmatch(r'\[(.+),(.+)\]', version_range) dep = Dependency(name, [[('>=', m.group(1)), ('<=', m.group(2))]]) # (1.0,2.0) -> 1.0<x<2.0 elif re.fullmatch(r'\((.+),(.+)\)', version_range): m = re.fullmatch(r'\((.+),(.+)\)', version_range) dep = Dependency(name, [[('>', m.group(1)), ('<', m.group(2))]]) # The following one is not in specification, # so we can just guess what was the intention. # Seen in NLog:5.0.0-beta08 dependencies # [1.0, ) -> 1.0≤x elif re.fullmatch(r'\[(.+), \)', version_range): m = re.fullmatch(r'\[(.+), \)', version_range) dep = Dependency(name, [('>=', m.group(1))]) # [1.0,2.0) -> 1.0≤x<2.0 elif re.fullmatch(r'\[(.+),(.+)\)', version_range): m = re.fullmatch(r'\[(.+),(.+)\)', version_range) dep = Dependency(name, [[('>=', m.group(1)), ('<', m.group(2))]]) # (1.0,) -> 1.0<x elif re.fullmatch(r'\((.+),\)', version_range): m = re.fullmatch(r'\((.+),\)', version_range) dep = Dependency(name, [('>', m.group(1))]) # [1.0] -> x==1.0 elif re.fullmatch(r'\[(.+)\]', version_range): m = re.fullmatch(r'\[(.+)\]', version_range) dep = Dependency(name, [('==', m.group(1))]) # (,1.0] -> x≤1.0 elif re.fullmatch(r'\(,(.+)\]', version_range): m = re.fullmatch(r'\(,(.+)\]', version_range) dep = Dependency(name, [('<=', m.group(1))]) # (,1.0) -> x<1.0 elif re.fullmatch(r'\(,(.+)\)', version_range): m = re.fullmatch(r'\(,(.+)\)', version_range) dep = Dependency(name, [('<', m.group(1))]) elif re.fullmatch(r'\((.+)\)', version_range): raise ValueError("invalid version range %r" % version_range) deps.append(dep) return deps class NoOpDependencyParser(DependencyParser): """ Dummy dependency parser for ecosystems that don't support version ranges. """ def parse(self, specs): return [Dependency(*x.split(' ')) for x in specs] @staticmethod def compose(deps): return DependencyParser.compose_sep(deps, ' ') @staticmethod def restrict_versions(deps): return deps class GolangDependencyParser(DependencyParser): """ Dependency parser for Golang. """ def parse(self, specs): dependencies = [] for spec in specs: spec_list = spec.split(' ') if len(spec_list) > 1: dependencies.append(Dependency(spec_list[0], spec_list[1])) else: dependencies.append(Dependency(spec_list[0], '')) return dependencies @staticmethod def compose(deps): return DependencyParser.compose_sep(deps, ' ') @staticmethod def restrict_versions(deps): return deps class Solver(object): def __init__(self, ecosystem, dep_parser=None, fetcher=None, highest_dependency_version=True): self.ecosystem = ecosystem self._dependency_parser = dep_parser self._release_fetcher = fetcher self._highest_dependency_version = highest_dependency_version @property def dependency_parser(self): return self._dependency_parser @property def release_fetcher(self): return self._release_fetcher def solve(self, dependencies, graceful=True, all_versions=False): """ Solve `dependencies` against upstream repository :param dependencies: List, List of dependencies in native format :param graceful: bool, Print info output to stdout :param all_versions: bool, Return all matched versions instead of the latest :return: Dict[str, str], Matched versions """ solved = {} for dep in self.dependency_parser.parse(dependencies): logger.debug("Fetching releases for: {}".format(dep)) name, releases = self.release_fetcher.fetch_releases(dep.name) if name in solved: raise SolverException("Dependency: {} is listed multiple times".format(name)) if not releases: if graceful: logger.info("No releases found for: %s", dep.name) else: raise SolverException("No releases found for: {}".format(dep.name)) matching = sorted([release for release in releases if release in dep], key=cmp_to_key(compare_version)) logger.debug(" matching:\n {}".format(matching)) if all_versions: solved[name] = matching else: if not matching: solved[name] = None else: if self._highest_dependency_version: solved[name] = matching[-1] else: solved[name] = matching[0] return solved class PypiSolver(Solver): def __init__(self, ecosystem, parser=None, fetcher=None): super(PypiSolver, self).__init__(ecosystem, parser or PypiDependencyParser(), fetcher or PypiReleasesFetcher(ecosystem)) class NpmSolver(Solver): def __init__(self, ecosystem, parser=None, fetcher=None): super(NpmSolver, self).__init__(ecosystem, parser or NpmDependencyParser(), fetcher or NpmReleasesFetcher(ecosystem)) class RubyGemsSolver(Solver): def __init__(self, ecosystem, parser=None, fetcher=None): super(RubyGemsSolver, self).__init__(ecosystem, parser or RubyGemsDependencyParser(), fetcher or RubyGemsReleasesFetcher(ecosystem)) class NugetSolver(Solver): # https://docs.microsoft.com/en-us/nuget/release-notes/nuget-2.8#-dependencyversion-switch def __init__(self, ecosystem, parser=None, fetcher=None): super(NugetSolver, self).__init__(ecosystem, parser or NugetDependencyParser(), fetcher or NugetReleasesFetcher(ecosystem), highest_dependency_version=False) class MavenManualSolver(Solver): """ If you need to resolve all versions or use specific DependencyParser. Otherwise use MavenSolver (below). """ def __init__(self, ecosystem, parser, fetcher=None): super().__init__(ecosystem, parser, fetcher or MavenReleasesFetcher(ecosystem)) class GolangSolver(Solver): def __init__(self, ecosystem, parser=None, fetcher=None): super(GolangSolver, self).__init__(ecosystem, parser or GolangDependencyParser(), fetcher or GolangReleasesFetcher(ecosystem)) def solve(self, dependencies): result = {} for dependency in self.dependency_parser.parse(dependencies): if dependency.spec: result[dependency.name] = dependency.spec else: version = self.release_fetcher.fetch_releases(dependency.name)[1][0] result[dependency.name] = version return result class MavenSolver(object): """ Doesn't inherit from Solver, because we don't use its solve(). We also don't need a DependencyParser nor a ReleasesFetcher for Maven. 'mvn versions:resolve-ranges' does all the dirty work for us. Resolves only to one version, so if you need solve(all_versions=True), use MavenManualSolver """ @staticmethod def _generate_pom_xml(to_solve): """ Create pom.xml with dependencies from to_solve and run 'mvn versions:resolve-ranges', which resolves the version ranges (overwrites the pom.xml). :param to_solve: {"groupId:artifactId": "version-range"} """ project = etree.Element('project') etree.SubElement(project, 'modelVersion').text = '4.0.0' etree.SubElement(project, 'groupId').text = 'foo.bar.baz' etree.SubElement(project, 'artifactId').text = 'testing' etree.SubElement(project, 'version').text = '1.0.0' dependencies = etree.SubElement(project, 'dependencies') for name, version_range in to_solve.items(): group_id, artifact_id = name.rstrip(':').split(':') dependency = etree.SubElement(dependencies, 'dependency') etree.SubElement(dependency, 'groupId').text = group_id etree.SubElement(dependency, 'artifactId').text = artifact_id etree.SubElement(dependency, 'version').text = version_range with open('pom.xml', 'wb') as pom: pom.write(etree.tostring(project, xml_declaration=True, pretty_print=True)) TimedCommand.get_command_output(['mvn', 'versions:resolve-ranges'], graceful=False) @staticmethod def _dependencies_from_pom_xml(): """ Extract dependencies from pom.xml in current directory :return: {"groupId:artifactId": "version"} """ solved = {} with open('pom.xml') as r: pom_dict = anymarkup.parse(r.read()) dependencies = pom_dict.get('project', {}).get('dependencies', {}).get('dependency', []) if not isinstance(dependencies, list): dependencies = [dependencies] for dependency in dependencies: name = "{}:{}".format(dependency['groupId'], dependency['artifactId']) solved[name] = dependency['version'] return solved @staticmethod def _resolve_versions(to_solve): """ Resolve version ranges in to_solve :param to_solve: {"groupId:artifactId": "version-range"} :return: {"groupId:artifactId": "version"} """ if not to_solve: return {} with tempdir() as tmpdir: with cwd(tmpdir): MavenSolver._generate_pom_xml(to_solve) return MavenSolver._dependencies_from_pom_xml() @staticmethod def is_version_range(ver_spec): # http://maven.apache.org/enforcer/enforcer-rules/versionRanges.html return re.search('[,()\[\]]', ver_spec) is not None def solve(self, dependencies): already_solved = {} to_solve = {} for dependency in dependencies: name, ver_spec = dependency.split(' ', 1) if not self.is_version_range(ver_spec): already_solved[name] = ver_spec else: to_solve[name] = ver_spec result = already_solved.copy() result.update(self._resolve_versions(to_solve)) return result def get_ecosystem_solver(ecosystem, with_parser=None, with_fetcher=None): """ Get `Solver` instance for particular ecosystem :param ecosystem: Ecosystem :param with_parser: DependencyParser instance :param with_fetcher: ReleasesFetcher instance :return: Solver """ if ecosystem.is_backed_by(EcosystemBackend.maven): if with_parser is None: return MavenSolver() else: return MavenManualSolver(ecosystem, with_parser, with_fetcher) elif ecosystem.is_backed_by(EcosystemBackend.npm): return NpmSolver(ecosystem, with_parser, with_fetcher) elif ecosystem.is_backed_by(EcosystemBackend.pypi): return PypiSolver(ecosystem, with_parser, with_fetcher) elif ecosystem.is_backed_by(EcosystemBackend.rubygems): return RubyGemsSolver(ecosystem, with_parser, with_fetcher) elif ecosystem.is_backed_by(EcosystemBackend.nuget): return NugetSolver(ecosystem, with_parser, with_fetcher) elif ecosystem.is_backed_by(EcosystemBackend.scm): return GolangSolver(ecosystem, with_parser, with_fetcher) raise ValueError('Unknown ecosystem: {}'.format(ecosystem.name)) def get_ecosystem_parser(ecosystem): if ecosystem.is_backed_by(EcosystemBackend.maven): return NoOpDependencyParser() elif ecosystem.is_backed_by(EcosystemBackend.npm): return NpmDependencyParser() elif ecosystem.is_backed_by(EcosystemBackend.pypi): return PypiDependencyParser() elif ecosystem.is_backed_by(EcosystemBackend.rubygems): return RubyGemsDependencyParser() elif ecosystem.is_backed_by(EcosystemBackend.nuget): return NugetDependencyParser() elif ecosystem.is_backed_by(EcosystemBackend.scm): return GolangDependencyParser() raise ValueError('Unknown ecosystem: {}'.format(ecosystem.name))
[ "import anymarkup\n", "from bs4 import BeautifulSoup\n", "from collections import defaultdict\n", "from functools import cmp_to_key\n", "import logging\n", "from lxml import etree\n", "from operator import itemgetter\n", "from pip.req.req_file import parse_requirements\n", "import re\n", "from requests import get\n", "from xmlrpc.client import ServerProxy\n", "from semantic_version import Version as semver_version\n", "from subprocess import check_output\n", "from tempfile import NamedTemporaryFile\n", "from urllib.parse import urljoin\n", "\n", "from f8a_worker.enums import EcosystemBackend\n", "from f8a_worker.models import Analysis, Ecosystem, Package, Version\n", "from f8a_worker.utils import cwd, tempdir, TimedCommand\n", "from f8a_worker.process import Git\n", "\n", "\n", "logger = logging.getLogger(__name__)\n", "\n", "\n", "class SolverException(Exception):\n", " pass\n", "\n", "\n", "class Tokens(object):\n", " \"\"\" Comparison token representation \"\"\"\n", " operators = ['>=', '<=', '==', '>', '<', '=', '!=']\n", " (GTE, LTE, EQ1, GT, LT, EQ2, NEQ) = range(len(operators))\n", "\n", "\n", "def compare_version(a, b):\n", " \"\"\"\n", " Compare two version strings\n", "\n", " :param a: str\n", " :param b: str\n", " :return: -1 / 0 / 1\n", " \"\"\"\n", "\n", " def _range(q):\n", " \"\"\"\n", " Convert a version string to array of integers:\n", " \"1.2.3\" -> [1, 2, 3]\n", "\n", " :param q: str\n", " :return: List[int]\n", " \"\"\"\n", " r = []\n", " for n in q.replace('-', '.').split('.'):\n", " try:\n", " r.append(int(n))\n", " except ValueError:\n", " # sort rc*, alpha, beta etc. lower than their non-annotated counterparts\n", " r.append(-1)\n", " return r\n", "\n", " def _append_zeros(x, num_zeros):\n", " \"\"\"\n", " Append `num_zeros` zeros to a copy of `x` and return it\n", "\n", " :param x: List[int]\n", " :param num_zeros: int\n", " :return: List[int]\n", " \"\"\"\n", " nx = list(x)\n", " for _ in range(num_zeros):\n", " nx.append(0)\n", " return nx\n", "\n", " def _cardinal(x, y):\n", " \"\"\"\n", " Make both input lists be of same cardinality\n", "\n", " :param x: List[int]\n", " :param y: List[int]\n", " :return: List[int]\n", " \"\"\"\n", " lx, ly = len(x), len(y)\n", " if lx == ly:\n", " return x, y\n", " elif lx > ly:\n", " return x, _append_zeros(y, lx - ly)\n", " else:\n", " return _append_zeros(x, ly - lx), y\n", "\n", " left, right = _cardinal(_range(a), _range(b))\n", "\n", " return (left > right) - (left < right)\n", "\n", "\n", "class ReleasesFetcher(object):\n", " def __init__(self, ecosystem):\n", " self._ecosystem = ecosystem\n", "\n", " @property\n", " def ecosystem(self):\n", " return self._ecosystem\n", "\n", " def fetch_releases(self, package):\n", " return None, None\n", "\n", "\n", "class PypiReleasesFetcher(ReleasesFetcher):\n", " def __init__(self, ecosystem):\n", " super(PypiReleasesFetcher, self).__init__(ecosystem)\n", " self._rpc = ServerProxy(self.ecosystem.fetch_url)\n", "\n", " def _search_package_name(self, package):\n", " \"\"\"\n", " Case insensitive search\n", "\n", " :param package: str, Name of the package\n", " :return:\n", " \"\"\"\n", " def find_pypi_pkg(package):\n", " packages = self._rpc.search({'name': package})\n", " if packages:\n", " exact_match = [p['name']\n", " for p in packages\n", " if p['name'].lower() == package.lower()]\n", " if exact_match:\n", " return exact_match.pop()\n", " res = find_pypi_pkg(package)\n", " if res is None and '-' in package:\n", " # this is soooo annoying; you can `pip3 install argon2-cffi and it installs\n", " # argon2_cffi (underscore instead of dash), but searching through XMLRPC\n", " # API doesn't find it... so we try to search for underscore variant\n", " # if the dash variant isn't found\n", " res = find_pypi_pkg(package.replace('-', '_'))\n", " if res:\n", " return res\n", "\n", " raise ValueError(\"Package {} not found\".format(package))\n", "\n", " def fetch_releases(self, package):\n", " \"\"\" XML-RPC API Documentation: https://wiki.python.org/moin/PyPIXmlRpc\n", "\n", " Signature: package_releases(package_name, show_hidden=False)\n", " \"\"\"\n", " if not package:\n", " raise ValueError(\"package\")\n", "\n", " releases = self._rpc.package_releases(package, True)\n", " if not releases:\n", " # try again with swapped case of first character\n", " releases = self._rpc.package_releases(package[0].swapcase() + package[1:], True)\n", " if not releases:\n", " # if nothing was found then do case-insensitive search\n", " return self.fetch_releases(self._search_package_name(package))\n", "\n", " return package.lower(), releases\n", "\n", "\n", "class NpmReleasesFetcher(ReleasesFetcher):\n", " def __init__(self, ecosystem):\n", " super(NpmReleasesFetcher, self).__init__(ecosystem)\n", "\n", " def fetch_releases(self, package):\n", " \"\"\"\n", " Example output from the NPM endpoint:\n", "\n", " {\n", " ...\n", " versions: {\n", " \"0.1.0\": {},\n", " \"0.1.2\": {}\n", " ...\n", " }\n", " }\n", " \"\"\"\n", " if not package:\n", " raise ValueError(\"package\")\n", "\n", " r = get(self.ecosystem.fetch_url + package)\n", " if r.status_code == 404:\n", " if package.lower() != package:\n", " return self.fetch_releases(package.lower())\n", " raise ValueError(\"Package {} not found\".format(package))\n", "\n", " if 'versions' not in r.json().keys():\n", " raise ValueError(\"Package {} does not have associated versions\".format(package))\n", "\n", " return package, list(r.json()['versions'].keys())\n", "\n", "\n", "class RubyGemsReleasesFetcher(ReleasesFetcher):\n", " def __init__(self, ecosystem):\n", " super(RubyGemsReleasesFetcher, self).__init__(ecosystem)\n", "\n", " def _search_package_name(self, package):\n", " url = '{url}/search.json?query={pkg}'.format(url=self.ecosystem.fetch_url,\n", " pkg=package)\n", " r = get(url)\n", " if r.status_code == 200:\n", " exact_match = [p['name']\n", " for p in r.json()\n", " if p['name'].lower() == package.lower()]\n", " if exact_match:\n", " return exact_match.pop()\n", "\n", " raise ValueError(\"Package {} not found\".format(package))\n", "\n", " def fetch_releases(self, package):\n", " \"\"\"\n", " Example output from the RubyGems endpoint\n", "\n", " [\n", " {\n", " \"number\": \"1.0.0\",\n", " ...\n", " },\n", " {\n", " \"number\": \"2.0.0\",\n", " ...\n", " }\n", " ...\n", " ]\n", " \"\"\"\n", " if not package:\n", " raise ValueError(\"package\")\n", "\n", " url = '{url}/versions/{pkg}.json'.format(url=self.ecosystem.fetch_url,\n", " pkg=package)\n", " r = get(url)\n", " if r.status_code == 404:\n", " return self.fetch_releases(self._search_package_name(package))\n", "\n", " return package, [ver['number'] for ver in r.json()]\n", "\n", "\n", "class NugetReleasesFetcher(ReleasesFetcher):\n", " def __init__(self, ecosystem):\n", " super(NugetReleasesFetcher, self).__init__(ecosystem)\n", "\n", " @staticmethod\n", " def scrape_versions_from_nuget_org(package, sort_by_downloads=False):\n", " \"\"\"\n", " Scrape 'Version History' from https://www.nuget.org/packages/<package>\n", " \"\"\"\n", " releases = []\n", " nuget_packages_url = 'https://www.nuget.org/packages/'\n", " page = get(nuget_packages_url + package)\n", " page = BeautifulSoup(page.text, 'html.parser')\n", " version_history = page.find(class_=\"version-history\")\n", " for version in version_history.find_all(href=re.compile('/packages/')):\n", " version_text = version.text.replace('(current version)', '').strip()\n", " try:\n", " semver_version.coerce(version_text)\n", " downloads = int(version.find_next('td').text.strip().replace(',', ''))\n", " except ValueError:\n", " pass\n", " else:\n", " releases.append((version_text, downloads))\n", " if sort_by_downloads:\n", " releases.sort(key=itemgetter(1))\n", " return package, [p[0] for p in reversed(releases)]\n", "\n", " def fetch_releases(self, package):\n", " if not package:\n", " raise ValueError(\"package not specified\")\n", "\n", " # There's an API interface which lists available releases at\n", " # https://api.nuget.org/v3-flatcontainer/{package}/index.json\n", " # But it lists also unlisted/deprecated/shouldn't-be-used versions,\n", " # so we don't use it.\n", "\n", " return self.scrape_versions_from_nuget_org(package)\n", "\n", "\n", "class MavenReleasesFetcher(ReleasesFetcher):\n", " def __init__(self, ecosystem):\n", " super().__init__(ecosystem)\n", "\n", " @staticmethod\n", " def releases_from_maven_org(url):\n", " releases = []\n", " page = BeautifulSoup(get(url).text, 'html.parser')\n", " for link in page.find_all('a'):\n", " if link.text.endswith('/') and link.text != '../':\n", " releases.append(link.text.rstrip('/'))\n", " return releases\n", "\n", " def fetch_releases(self, package):\n", " if not package:\n", " raise ValueError(\"package not specified\")\n", " try:\n", " group_id, artifact_id = package.split(':')\n", " except ValueError as exc:\n", " raise ValueError(\"Invalid Maven coordinates: {a}\".format(a=package)) from exc\n", "\n", " maven_url = \"http://repo1.maven.org/maven2/\"\n", " dir_path = \"{g}/{a}/\".format(g=group_id.replace('.', '/'), a=artifact_id)\n", " url = urljoin(maven_url, dir_path)\n", " return package, self.releases_from_maven_org(url)\n", "\n", "\n", "class GolangReleasesFetcher(ReleasesFetcher):\n", " def __init__(self, ecosystem):\n", " super(GolangReleasesFetcher, self).__init__(ecosystem)\n", "\n", " def fetch_releases(self, package):\n", " if not package:\n", " raise ValueError('package not specified')\n", "\n", " parts = package.split(\"/\")[:3]\n", " if len(parts) == 3: # this assumes github.com/org/project like structure\n", " host, org, proj = parts\n", " repo_url = 'git://{host}/{org}/{proj}.git'.format(host=host, org=org, proj=proj)\n", " elif len(parts) == 2 and parts[0] == 'gopkg.in': # specific to gopkg.in/packages\n", " host, proj = parts\n", " repo_url = 'https://{host}/{proj}.git'.format(host=host, proj=proj)\n", " else:\n", " raise ValueError(\"Package {} is invalid git repository\".format(package))\n", "\n", " output = Git.ls_remote(repo_url, args=['-q'], refs=['HEAD'])\n", " version, ref = output[0].split()\n", "\n", " if not version:\n", " raise ValueError(\"Package {} does not have associated versions\".format(package))\n", "\n", " return package, [version]\n", "\n", "\n", "class F8aReleasesFetcher(ReleasesFetcher):\n", " def __init__(self, ecosystem, database):\n", " super(F8aReleasesFetcher, self).__init__(ecosystem)\n", " self.database = database\n", "\n", " def fetch_releases(self, package):\n", " \"\"\"\n", " Fetch analysed versions for specific ecosystem + package from f8a\n", " \"\"\"\n", " query = self.database.query(Version).\\\n", " join(Analysis).join(Package).join(Ecosystem).\\\n", " filter(Package.name == package,\n", " Ecosystem.name == self.ecosystem.name,\n", " Analysis.finished_at.isnot(None))\n", " versions = {v.identifier for v in query}\n", " return package, list(sorted(versions, key=cmp_to_key(compare_version)))\n", "\n", "\n", "class Dependency(object):\n", " def __init__(self, name, spec):\n", " self._name = name\n", " # spec is a list where each item is either 2-tuple (operator, version) or list of these\n", " # example: [[('>=', '0.6.0'), ('<', '0.7.0')], ('>', '1.0.0')] means:\n", " # (>=0.6.0 and <0.7.0) or >1.0.0\n", " self._spec = spec\n", "\n", " @property\n", " def name(self):\n", " return self._name\n", "\n", " @property\n", " def spec(self):\n", " return self._spec\n", "\n", " def __contains__(self, item):\n", " return self.check(item)\n", "\n", " def __repr__(self):\n", " return \"{} {}\".format(self.name, self.spec)\n", "\n", " def __eq__(self, other):\n", " return self.name == other.name and self.spec == other.spec\n", "\n", " def check(self, version):\n", " \"\"\"\n", " Check if `version` fits into our dependency specification\n", "\n", " :param version: str\n", " :return: bool\n", " \"\"\"\n", " def _compare_spec(spec):\n", " if len(spec) == 1:\n", " spec = ('=', spec[0])\n", "\n", " token = Tokens.operators.index(spec[0])\n", " comparison = compare_version(version, spec[1])\n", " if token in [Tokens.EQ1, Tokens.EQ2]:\n", " return comparison == 0\n", " elif token == Tokens.GT:\n", " return comparison == 1\n", " elif token == Tokens.LT:\n", " return comparison == -1\n", " elif token == Tokens.GTE:\n", " return comparison >= 0\n", " elif token == Tokens.LTE:\n", " return comparison <= 0\n", " elif token == Tokens.NEQ:\n", " return comparison != 0\n", " else:\n", " raise ValueError('Invalid comparison token')\n", "\n", " results, intermediaries = False, False\n", " for spec in self.spec:\n", " if isinstance(spec, list):\n", " intermediary = True\n", " for sub in spec:\n", " intermediary &= _compare_spec(sub)\n", " intermediaries |= intermediary\n", " elif isinstance(spec, tuple):\n", " results |= _compare_spec(spec)\n", "\n", " return results or intermediaries\n", "\n", "\n", "class DependencyParser(object):\n", " def parse(self, specs):\n", " pass\n", "\n", " @staticmethod\n", " def compose_sep(deps, separator):\n", " \"\"\"\n", " Opposite of parse()\n", " :param deps: list of Dependency()\n", " :return: dict of {name: version spec}\n", " \"\"\"\n", " result = {}\n", " for dep in deps:\n", " if dep.name not in result:\n", " result[dep.name] = separator.join([op + ver for op, ver in dep.spec])\n", " else:\n", " result[dep.name] += separator + separator.join([op + ver for op, ver in dep.spec])\n", " return result\n", "\n", "\n", "class PypiDependencyParser(DependencyParser):\n", " @staticmethod\n", " def _parse_python(spec):\n", " \"\"\"\n", " Parse PyPI specification of a single dependency\n", "\n", " :param spec: str, for example \"Django>=1.5,<1.8\"\n", " :return: [Django [[('>=', '1.5'), ('<', '1.8')]]]\n", " \"\"\"\n", "\n", " def _extract_op_version(spec):\n", " # https://www.python.org/dev/peps/pep-0440/#compatible-release\n", " if spec.operator == '~=':\n", " version = spec.version.split('.')\n", " if len(version) in {2, 3, 4}:\n", " if len(version) in {3, 4}:\n", " del version[-1] # will increase the last but one in next line\n", " version[-1] = str(int(version[-1]) + 1)\n", " else:\n", " raise ValueError('%r must not be used with %r' % (spec.operator, spec.version))\n", " return [('>=', spec.version), ('<', '.'.join(version))]\n", " # Trailing .* is permitted per\n", " # https://www.python.org/dev/peps/pep-0440/#version-matching\n", " elif spec.operator == '==' and spec.version.endswith('.*'):\n", " try:\n", " result = check_output(['/usr/bin/semver-ranger', spec.version],\n", " universal_newlines=True).strip()\n", " gte, lt = result.split()\n", " return [('>=', gte.lstrip('>=')), ('<', lt.lstrip('<'))]\n", " except ValueError:\n", " logger.info(\"couldn't resolve ==%s\", spec.version)\n", " return spec.operator, spec.version\n", " # https://www.python.org/dev/peps/pep-0440/#arbitrary-equality\n", " # Use of this operator is heavily discouraged, so just convert it to 'Version matching'\n", " elif spec.operator == '===':\n", " return '==', spec.version\n", " else:\n", " return spec.operator, spec.version\n", "\n", " def _get_pip_spec(requirements):\n", " '''In Pip 8+ there's no `specs` field and we have to dig the\n", " information from the `specifier` field'''\n", " if hasattr(requirements, 'specs'):\n", " return requirements.specs\n", " elif hasattr(requirements, 'specifier'):\n", " specs = [_extract_op_version(spec) for spec in requirements.specifier]\n", " if len(specs) == 0:\n", " specs = [('>=', '0.0.0')]\n", " elif len(specs) > 1:\n", " specs = [specs]\n", " return specs\n", "\n", " # create a temporary file and store the spec there since\n", " # `parse_requirements` requires a file\n", " with NamedTemporaryFile(mode='w+', suffix='pysolve') as f:\n", " f.write(spec)\n", " f.flush()\n", " parsed = parse_requirements(f.name, session=f.name)\n", " dependency = [Dependency(x.name, _get_pip_spec(x.req)) for x in parsed].pop()\n", "\n", " return dependency\n", "\n", " def parse(self, specs):\n", " return [self._parse_python(s) for s in specs]\n", "\n", " @staticmethod\n", " def compose(deps):\n", " return DependencyParser.compose_sep(deps, ',')\n", "\n", " @staticmethod\n", " def restrict_versions(deps):\n", " return deps # TODO\n", "\n", "\n", "class NpmDependencyParser(DependencyParser):\n", " @staticmethod\n", " def _parse_npm_tokens(spec):\n", " for token in Tokens.operators:\n", " if token in spec:\n", " return token, spec.split(token)[1]\n", " return spec,\n", "\n", " def _parse_npm(self, name, spec):\n", " \"\"\"\n", " Parse NPM specification of a single dependency\n", "\n", " :param name: str\n", " :param spec: str\n", " :return: Dependency\n", " \"\"\"\n", " specs = check_output(['/usr/bin/semver-ranger', spec], universal_newlines=True).strip()\n", " if specs == 'null':\n", " logger.info(\"invalid version specification for %s = %s\", name, spec)\n", " return None\n", "\n", " ret = []\n", " for s in specs.split('||'):\n", " if ' ' in s:\n", " spaced = s.split(' ')\n", " assert len(spaced) == 2\n", " left, right = spaced\n", " ret.append([self._parse_npm_tokens(left), self._parse_npm_tokens(right)])\n", " elif s == '*':\n", " ret.append(('>=', '0.0.0'))\n", " else:\n", " ret.append(self._parse_npm_tokens(s))\n", "\n", " return Dependency(name, ret)\n", "\n", " def parse(self, specs):\n", " deps = []\n", " for spec in specs:\n", " name, ver = spec.split(' ', 1)\n", " parsed = self._parse_npm(name, ver)\n", " if parsed:\n", " deps.append(parsed)\n", "\n", " return deps\n", "\n", " @staticmethod\n", " def compose(deps):\n", " return DependencyParser.compose_sep(deps, ' ')\n", "\n", " @staticmethod\n", " def restrict_versions(deps):\n", " \"\"\"\n", " From list of semver ranges select only the most restricting ones for each operator.\n", "\n", " :param deps: list of Dependency(), example:\n", " [node [('>=', '0.6.0')], node [('<', '1.0.0')], node [('>=', '0.8.0')]]\n", " :return: list of Dependency() with only the most restrictive versions, example:\n", " [node [('<', '1.0.0')], node [('>=', '0.8.0')]]\n", " \"\"\"\n", " # list to dict\n", " # {\n", " # 'node' : {\n", " # '>=': ['0.8.0', '0.6.0'],\n", " # '<': ['1.0.0']\n", " # }\n", " # }\n", " dps_dict = defaultdict(dict)\n", " for dp in deps:\n", " if dp.name not in dps_dict:\n", " dps_dict[dp.name] = defaultdict(list)\n", " for spec in dp.spec:\n", " if len(spec) != 2:\n", " continue\n", " operator, version = spec\n", " dps_dict[dp.name][operator].append(version)\n", "\n", " # select only the most restrictive versions\n", " result = []\n", " for name, version_spec_dict in dps_dict.items():\n", " specs = []\n", " for operator, versions in version_spec_dict.items():\n", " if operator in ['>', '>=']: # select highest version\n", " version = sorted(versions, key=cmp_to_key(compare_version))[-1]\n", " elif operator in ['<', '<=']: # select lowest version\n", " version = sorted(versions, key=cmp_to_key(compare_version))[0]\n", " specs.append((operator, version))\n", " # dict back to list\n", " result.append(Dependency(name, specs))\n", "\n", " return result\n", "\n", "\n", "RubyGemsDependencyParser = NpmDependencyParser\n", "\n", "\n", "class OSSIndexDependencyParser(NpmDependencyParser):\n", " def _parse_npm(self, name, spec):\n", " \"\"\" Parse OSS Index version specification.\n", " It's similar to NPM semver, with few tweaks. \"\"\"\n", " # sometimes there's '|' instead of '||', but the meaning seems to be the same\n", " spec = spec.replace(' | ', ' || ')\n", " # remove superfluous brackets\n", " spec = spec.replace('(', '').replace(')', '')\n", " return super()._parse_npm(name, spec)\n", "\n", "\n", "class NugetDependencyParser(object):\n", " # https://docs.microsoft.com/en-us/nuget/create-packages/dependency-versions#version-ranges\n", " def parse(self, specs):\n", " deps = []\n", " for spec in specs:\n", " name, version_range = spec.split(' ', 1)\n", "\n", " # 1.0 -> 1.0≤x\n", " if re.search('[,()\\[\\]]', version_range) is None:\n", " dep = Dependency(name, [('>=', version_range)])\n", " # [1.0,2.0] -> 1.0≤x≤2.0\n", " elif re.fullmatch(r'\\[(.+),(.+)\\]', version_range):\n", " m = re.fullmatch(r'\\[(.+),(.+)\\]', version_range)\n", " dep = Dependency(name, [[('>=', m.group(1)), ('<=', m.group(2))]])\n", " # (1.0,2.0) -> 1.0<x<2.0\n", " elif re.fullmatch(r'\\((.+),(.+)\\)', version_range):\n", " m = re.fullmatch(r'\\((.+),(.+)\\)', version_range)\n", " dep = Dependency(name, [[('>', m.group(1)), ('<', m.group(2))]])\n", " # The following one is not in specification,\n", " # so we can just guess what was the intention.\n", " # Seen in NLog:5.0.0-beta08 dependencies\n", " # [1.0, ) -> 1.0≤x\n", " elif re.fullmatch(r'\\[(.+), \\)', version_range):\n", " m = re.fullmatch(r'\\[(.+), \\)', version_range)\n", " dep = Dependency(name, [('>=', m.group(1))])\n", " # [1.0,2.0) -> 1.0≤x<2.0\n", " elif re.fullmatch(r'\\[(.+),(.+)\\)', version_range):\n", " m = re.fullmatch(r'\\[(.+),(.+)\\)', version_range)\n", " dep = Dependency(name, [[('>=', m.group(1)), ('<', m.group(2))]])\n", " # (1.0,) -> 1.0<x\n", " elif re.fullmatch(r'\\((.+),\\)', version_range):\n", " m = re.fullmatch(r'\\((.+),\\)', version_range)\n", " dep = Dependency(name, [('>', m.group(1))])\n", " # [1.0] -> x==1.0\n", " elif re.fullmatch(r'\\[(.+)\\]', version_range):\n", " m = re.fullmatch(r'\\[(.+)\\]', version_range)\n", " dep = Dependency(name, [('==', m.group(1))])\n", " # (,1.0] -> x≤1.0\n", " elif re.fullmatch(r'\\(,(.+)\\]', version_range):\n", " m = re.fullmatch(r'\\(,(.+)\\]', version_range)\n", " dep = Dependency(name, [('<=', m.group(1))])\n", " # (,1.0) -> x<1.0\n", " elif re.fullmatch(r'\\(,(.+)\\)', version_range):\n", " m = re.fullmatch(r'\\(,(.+)\\)', version_range)\n", " dep = Dependency(name, [('<', m.group(1))])\n", " elif re.fullmatch(r'\\((.+)\\)', version_range):\n", " raise ValueError(\"invalid version range %r\" % version_range)\n", "\n", " deps.append(dep)\n", "\n", " return deps\n", "\n", "\n", "class NoOpDependencyParser(DependencyParser):\n", " \"\"\"\n", " Dummy dependency parser for ecosystems that don't support version ranges.\n", " \"\"\"\n", " def parse(self, specs):\n", " return [Dependency(*x.split(' ')) for x in specs]\n", "\n", " @staticmethod\n", " def compose(deps):\n", " return DependencyParser.compose_sep(deps, ' ')\n", "\n", " @staticmethod\n", " def restrict_versions(deps):\n", " return deps\n", "\n", "\n", "class GolangDependencyParser(DependencyParser):\n", " \"\"\"\n", " Dependency parser for Golang.\n", " \"\"\"\n", " def parse(self, specs):\n", " dependencies = []\n", " for spec in specs:\n", " spec_list = spec.split(' ')\n", " if len(spec_list) > 1:\n", " dependencies.append(Dependency(spec_list[0], spec_list[1]))\n", " else:\n", " dependencies.append(Dependency(spec_list[0], ''))\n", " return dependencies\n", "\n", " @staticmethod\n", " def compose(deps):\n", " return DependencyParser.compose_sep(deps, ' ')\n", "\n", " @staticmethod\n", " def restrict_versions(deps):\n", " return deps\n", "\n", "\n", "class Solver(object):\n", " def __init__(self, ecosystem, dep_parser=None, fetcher=None, highest_dependency_version=True):\n", " self.ecosystem = ecosystem\n", " self._dependency_parser = dep_parser\n", " self._release_fetcher = fetcher\n", " self._highest_dependency_version = highest_dependency_version\n", "\n", " @property\n", " def dependency_parser(self):\n", " return self._dependency_parser\n", "\n", " @property\n", " def release_fetcher(self):\n", " return self._release_fetcher\n", "\n", " def solve(self, dependencies, graceful=True, all_versions=False):\n", " \"\"\"\n", " Solve `dependencies` against upstream repository\n", "\n", " :param dependencies: List, List of dependencies in native format\n", " :param graceful: bool, Print info output to stdout\n", " :param all_versions: bool, Return all matched versions instead of the latest\n", " :return: Dict[str, str], Matched versions\n", " \"\"\"\n", "\n", " solved = {}\n", " for dep in self.dependency_parser.parse(dependencies):\n", " logger.debug(\"Fetching releases for: {}\".format(dep))\n", "\n", " name, releases = self.release_fetcher.fetch_releases(dep.name)\n", "\n", " if name in solved:\n", " raise SolverException(\"Dependency: {} is listed multiple times\".format(name))\n", "\n", " if not releases:\n", " if graceful:\n", " logger.info(\"No releases found for: %s\", dep.name)\n", " else:\n", " raise SolverException(\"No releases found for: {}\".format(dep.name))\n", "\n", " matching = sorted([release\n", " for release in releases\n", " if release in dep], key=cmp_to_key(compare_version))\n", "\n", " logger.debug(\" matching:\\n {}\".format(matching))\n", "\n", " if all_versions:\n", " solved[name] = matching\n", " else:\n", " if not matching:\n", " solved[name] = None\n", " else:\n", " if self._highest_dependency_version:\n", " solved[name] = matching[-1]\n", " else:\n", " solved[name] = matching[0]\n", "\n", " return solved\n", "\n", "\n", "class PypiSolver(Solver):\n", " def __init__(self, ecosystem, parser=None, fetcher=None):\n", " super(PypiSolver, self).__init__(ecosystem,\n", " parser or PypiDependencyParser(),\n", " fetcher or PypiReleasesFetcher(ecosystem))\n", "\n", "\n", "class NpmSolver(Solver):\n", " def __init__(self, ecosystem, parser=None, fetcher=None):\n", " super(NpmSolver, self).__init__(ecosystem,\n", " parser or NpmDependencyParser(),\n", " fetcher or NpmReleasesFetcher(ecosystem))\n", "\n", "\n", "class RubyGemsSolver(Solver):\n", " def __init__(self, ecosystem, parser=None, fetcher=None):\n", " super(RubyGemsSolver, self).__init__(ecosystem,\n", " parser or RubyGemsDependencyParser(),\n", " fetcher or RubyGemsReleasesFetcher(ecosystem))\n", "\n", "\n", "class NugetSolver(Solver):\n", " # https://docs.microsoft.com/en-us/nuget/release-notes/nuget-2.8#-dependencyversion-switch\n", " def __init__(self, ecosystem, parser=None, fetcher=None):\n", " super(NugetSolver, self).__init__(ecosystem,\n", " parser or NugetDependencyParser(),\n", " fetcher or NugetReleasesFetcher(ecosystem),\n", " highest_dependency_version=False)\n", "\n", "\n", "class MavenManualSolver(Solver):\n", " \"\"\" If you need to resolve all versions or use specific DependencyParser.\n", " Otherwise use MavenSolver (below).\n", " \"\"\"\n", " def __init__(self, ecosystem, parser, fetcher=None):\n", " super().__init__(ecosystem,\n", " parser,\n", " fetcher or MavenReleasesFetcher(ecosystem))\n", "\n", "\n", "class GolangSolver(Solver):\n", " def __init__(self, ecosystem, parser=None, fetcher=None):\n", " super(GolangSolver, self).__init__(ecosystem,\n", " parser or GolangDependencyParser(),\n", " fetcher or GolangReleasesFetcher(ecosystem))\n", "\n", " def solve(self, dependencies):\n", " result = {}\n", " for dependency in self.dependency_parser.parse(dependencies):\n", " if dependency.spec:\n", " result[dependency.name] = dependency.spec\n", " else:\n", " version = self.release_fetcher.fetch_releases(dependency.name)[1][0]\n", " result[dependency.name] = version\n", " return result\n", "\n", "\n", "class MavenSolver(object):\n", " \"\"\"\n", " Doesn't inherit from Solver, because we don't use its solve().\n", " We also don't need a DependencyParser nor a ReleasesFetcher for Maven.\n", " 'mvn versions:resolve-ranges' does all the dirty work for us.\n", " Resolves only to one version, so if you need solve(all_versions=True), use MavenManualSolver\n", " \"\"\"\n", "\n", " @staticmethod\n", " def _generate_pom_xml(to_solve):\n", " \"\"\"\n", " Create pom.xml with dependencies from to_solve and run 'mvn versions:resolve-ranges',\n", " which resolves the version ranges (overwrites the pom.xml).\n", " :param to_solve: {\"groupId:artifactId\": \"version-range\"}\n", " \"\"\"\n", " project = etree.Element('project')\n", " etree.SubElement(project, 'modelVersion').text = '4.0.0'\n", " etree.SubElement(project, 'groupId').text = 'foo.bar.baz'\n", " etree.SubElement(project, 'artifactId').text = 'testing'\n", " etree.SubElement(project, 'version').text = '1.0.0'\n", " dependencies = etree.SubElement(project, 'dependencies')\n", " for name, version_range in to_solve.items():\n", " group_id, artifact_id = name.rstrip(':').split(':')\n", " dependency = etree.SubElement(dependencies, 'dependency')\n", " etree.SubElement(dependency, 'groupId').text = group_id\n", " etree.SubElement(dependency, 'artifactId').text = artifact_id\n", " etree.SubElement(dependency, 'version').text = version_range\n", " with open('pom.xml', 'wb') as pom:\n", " pom.write(etree.tostring(project, xml_declaration=True, pretty_print=True))\n", " TimedCommand.get_command_output(['mvn', 'versions:resolve-ranges'], graceful=False)\n", "\n", " @staticmethod\n", " def _dependencies_from_pom_xml():\n", " \"\"\"\n", " Extract dependencies from pom.xml in current directory\n", " :return: {\"groupId:artifactId\": \"version\"}\n", " \"\"\"\n", " solved = {}\n", " with open('pom.xml') as r:\n", " pom_dict = anymarkup.parse(r.read())\n", " dependencies = pom_dict.get('project', {}).get('dependencies', {}).get('dependency', [])\n", " if not isinstance(dependencies, list):\n", " dependencies = [dependencies]\n", " for dependency in dependencies:\n", " name = \"{}:{}\".format(dependency['groupId'], dependency['artifactId'])\n", " solved[name] = dependency['version']\n", " return solved\n", "\n", " @staticmethod\n", " def _resolve_versions(to_solve):\n", " \"\"\"\n", " Resolve version ranges in to_solve\n", " :param to_solve: {\"groupId:artifactId\": \"version-range\"}\n", " :return: {\"groupId:artifactId\": \"version\"}\n", " \"\"\"\n", " if not to_solve:\n", " return {}\n", " with tempdir() as tmpdir:\n", " with cwd(tmpdir):\n", " MavenSolver._generate_pom_xml(to_solve)\n", " return MavenSolver._dependencies_from_pom_xml()\n", "\n", " @staticmethod\n", " def is_version_range(ver_spec):\n", " # http://maven.apache.org/enforcer/enforcer-rules/versionRanges.html\n", " return re.search('[,()\\[\\]]', ver_spec) is not None\n", "\n", " def solve(self, dependencies):\n", " already_solved = {}\n", " to_solve = {}\n", " for dependency in dependencies:\n", " name, ver_spec = dependency.split(' ', 1)\n", " if not self.is_version_range(ver_spec):\n", " already_solved[name] = ver_spec\n", " else:\n", " to_solve[name] = ver_spec\n", " result = already_solved.copy()\n", " result.update(self._resolve_versions(to_solve))\n", " return result\n", "\n", "\n", "def get_ecosystem_solver(ecosystem, with_parser=None, with_fetcher=None):\n", " \"\"\"\n", " Get `Solver` instance for particular ecosystem\n", "\n", " :param ecosystem: Ecosystem\n", " :param with_parser: DependencyParser instance\n", " :param with_fetcher: ReleasesFetcher instance\n", " :return: Solver\n", " \"\"\"\n", " if ecosystem.is_backed_by(EcosystemBackend.maven):\n", " if with_parser is None:\n", " return MavenSolver()\n", " else:\n", " return MavenManualSolver(ecosystem, with_parser, with_fetcher)\n", " elif ecosystem.is_backed_by(EcosystemBackend.npm):\n", " return NpmSolver(ecosystem, with_parser, with_fetcher)\n", " elif ecosystem.is_backed_by(EcosystemBackend.pypi):\n", " return PypiSolver(ecosystem, with_parser, with_fetcher)\n", " elif ecosystem.is_backed_by(EcosystemBackend.rubygems):\n", " return RubyGemsSolver(ecosystem, with_parser, with_fetcher)\n", " elif ecosystem.is_backed_by(EcosystemBackend.nuget):\n", " return NugetSolver(ecosystem, with_parser, with_fetcher)\n", " elif ecosystem.is_backed_by(EcosystemBackend.scm):\n", " return GolangSolver(ecosystem, with_parser, with_fetcher)\n", "\n", " raise ValueError('Unknown ecosystem: {}'.format(ecosystem.name))\n", "\n", "\n", "def get_ecosystem_parser(ecosystem):\n", " if ecosystem.is_backed_by(EcosystemBackend.maven):\n", " return NoOpDependencyParser()\n", " elif ecosystem.is_backed_by(EcosystemBackend.npm):\n", " return NpmDependencyParser()\n", " elif ecosystem.is_backed_by(EcosystemBackend.pypi):\n", " return PypiDependencyParser()\n", " elif ecosystem.is_backed_by(EcosystemBackend.rubygems):\n", " return RubyGemsDependencyParser()\n", " elif ecosystem.is_backed_by(EcosystemBackend.nuget):\n", " return NugetDependencyParser()\n", " elif ecosystem.is_backed_by(EcosystemBackend.scm):\n", " return GolangDependencyParser()\n", "\n", " raise ValueError('Unknown ecosystem: {}'.format(ecosystem.name))\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011235955056179775, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0.011627906976744186, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010752688172043012, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010752688172043012, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0.010752688172043012, 0.011111111111111112, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0.010752688172043012, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010416666666666666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011627906976744186, 0, 0.010101010101010102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0.01, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010416666666666666, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010869565217391304, 0, 0, 0.010101010101010102, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03225806451612903, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010101010101010102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010638297872340425, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0.012048192771084338, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010309278350515464, 0, 0, 0, 0, 0, 0.010638297872340425, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010309278350515464, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
945
0.000729
import functools import logging from aws_utils import NoSuchActivityError from aws_utils.utils import poller import boto3 logging.basicConfig(format='%(levelname)s:\t%(message)s', level=logging.INFO) logger = logging.getLogger(__name__) EXIT_STATUS = 'ERROR' class HealthStatusError(Exception): pass def get_boto3_emr_pipeline_settings(pipeline_name, region): """ Connects to AWS and gets a list of a EMR clusters on the region Args: region (str): AWS region Returns: list of clusters of a EMR """ boto3client_pipelines = boto3.client('datapipeline', region) # 'us-east-1') pipelines = boto3client_pipelines.list_pipelines() for pipeline in pipelines['pipelineIdList']: if pipeline['name'] == pipeline_name: return boto3client_pipelines.describe_pipelines(pipelineIds=[pipeline['id']]) def get_pipeline_state(pipeline_settings): """ Connects to AWS and gets a status of a pipeline :param pipeline_name: (str) name of the Pipeline :param region: (str) AWS region :return: A dictionary containing both status and state of the pipeline : e.g {'status': 7, 'health': 2} Possible return STATUSES : ACTIVATING, CANCELED, CASCADE_FAILED, DEACTIVATING, FAILED, FINISHED, INACTIVE, PAUSED, PENDING, RUNNING, SHUTTING_DOWN, SKIPPED, TIMEDOUT, VALIDATING, WAITING_FOR_RUNNER, WAITING_ON_DEPENDENCIES Possible return STATES : HEALTHY, ERROR """ pipeline_fields = pipeline_settings[ 'pipelineDescriptionList'][0]['fields'] current_pipeline_state = ''.join([item['stringValue'] for item in pipeline_fields if item['key'] == '@pipelineState']) current_health_status = ''.join([item['stringValue'] for item in pipeline_fields if item['key'] == '@healthStatus']) if current_pipeline_state is not '': return {'pipelineState': current_pipeline_state, 'healthStatus': current_health_status} else: raise NoSuchActivityError('The Pipeline state is empty') def poll_pipeline_for_state(pipeline_name, region, terminating_state, interval, max_retry): logger.info('Terminating state: %s', terminating_state) def compare_state_from_list_pipelines(pipeline_settings): status = get_pipeline_state(pipeline_settings) logger.info('Cluster [%s] state: %s', pipeline_name, status) return str(terminating_state['pipelineState']) == str(status['pipelineState']) \ or str(status['healthStatus']) == "ERROR" func_callable = functools.partial(get_boto3_emr_pipeline_settings, pipeline_name, region) return poller(func_callable, compare_state_from_list_pipelines, interval, max_retry)
[ "import functools\n", "import logging\n", "\n", "from aws_utils import NoSuchActivityError\n", "from aws_utils.utils import poller\n", "import boto3\n", "\n", "logging.basicConfig(format='%(levelname)s:\\t%(message)s', level=logging.INFO)\n", "logger = logging.getLogger(__name__)\n", "\n", "EXIT_STATUS = 'ERROR'\n", "\n", "\n", "class HealthStatusError(Exception):\n", " pass\n", "\n", "\n", "def get_boto3_emr_pipeline_settings(pipeline_name, region):\n", " \"\"\"\n", " Connects to AWS and gets a list of a EMR clusters on the region\n", "\n", " Args:\n", " region (str): AWS region\n", " Returns:\n", " list of clusters of a EMR\n", " \"\"\"\n", " boto3client_pipelines = boto3.client('datapipeline', region) # 'us-east-1')\n", " pipelines = boto3client_pipelines.list_pipelines()\n", "\n", " for pipeline in pipelines['pipelineIdList']:\n", " if pipeline['name'] == pipeline_name:\n", " return boto3client_pipelines.describe_pipelines(pipelineIds=[pipeline['id']])\n", "\n", "\n", "def get_pipeline_state(pipeline_settings):\n", " \"\"\"\n", " Connects to AWS and gets a status of a pipeline\n", " :param pipeline_name: (str) name of the Pipeline\n", " :param region: (str) AWS region\n", " :return: A dictionary containing both status and state of the pipeline : e.g {'status': 7, 'health': 2}\n", " Possible return STATUSES : ACTIVATING, CANCELED, CASCADE_FAILED, DEACTIVATING, FAILED, FINISHED, INACTIVE, PAUSED,\n", " PENDING, RUNNING, SHUTTING_DOWN, SKIPPED, TIMEDOUT, VALIDATING, WAITING_FOR_RUNNER, WAITING_ON_DEPENDENCIES\n", " Possible return STATES : HEALTHY, ERROR\n", " \"\"\"\n", " pipeline_fields = pipeline_settings[\n", " 'pipelineDescriptionList'][0]['fields']\n", " current_pipeline_state = ''.join([item['stringValue'] for item in pipeline_fields if item['key'] ==\n", " '@pipelineState'])\n", " current_health_status = ''.join([item['stringValue'] for item in pipeline_fields if item['key'] == '@healthStatus'])\n", "\n", " if current_pipeline_state is not '':\n", " return {'pipelineState': current_pipeline_state, 'healthStatus': current_health_status}\n", " else:\n", " raise NoSuchActivityError('The Pipeline state is empty')\n", "\n", "\n", "def poll_pipeline_for_state(pipeline_name, region, terminating_state, interval, max_retry):\n", " logger.info('Terminating state: %s', terminating_state)\n", "\n", " def compare_state_from_list_pipelines(pipeline_settings):\n", " status = get_pipeline_state(pipeline_settings)\n", " logger.info('Cluster [%s] state: %s', pipeline_name, status)\n", " return str(terminating_state['pipelineState']) == str(status['pipelineState']) \\\n", " or str(status['healthStatus']) == \"ERROR\"\n", "\n", " func_callable = functools.partial(get_boto3_emr_pipeline_settings, pipeline_name, region)\n", " return poller(func_callable, compare_state_from_list_pipelines, interval, max_retry)\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0, 0, 0, 0.009259259259259259, 0.008403361344537815, 0.0072992700729927005, 0, 0, 0, 0, 0.009615384615384616, 0, 0.008264462809917356, 0, 0, 0.010416666666666666, 0, 0, 0, 0, 0.010869565217391304, 0, 0, 0, 0, 0, 0.011235955056179775, 0.017543859649122806, 0, 0.010638297872340425, 0.011235955056179775 ]
67
0.002063
''' Based on the Simple Volume Rendering script from yt-project.org. http://yt-project.org/doc/cookbook/simple_plots.html#simple-volume-rendering Usage: python simple_volume_rendering.py PLOTFILE ''' from yt.mods import * import sys # Load the dataset. plotfile = sys.argv[1] pf = load(plotfile) # Create a data container (like a sphere or region) that # represents the entire domain. dd = pf.h.all_data() # Get the minimum and maximum densities. mi, ma = dd.quantities["Extrema"]("Density")[0] # Parameters numlevels = 5 # Number of Gaussians in the transfer function widths = 0.02 # Width of the Gaussians cmap = "spectral" # Color map center = [0.5, 0.5, 0.5]/pf["unitary"] # Center of the volume rendering look = [0.5, 0.2, 0.7]/pf["unitary"] # Look direction for the camera width = 1.0/pf["unitary"] # Width of the image. Use this to zoom in/out resolution = 512 # Number of pixels in each dimension of the final image # Create a transfer function to map field values to colors. # We bump up our minimum to cut out some of the background fluid tf = ColorTransferFunction((np.log10(mi)+1, np.log10(ma))) # Add several Gausians to the transfer function, evenly spaced # between the min and max specified above with widths w tf.add_layers(numlevels, w=widths, colormap=cmap) # Create the camera object cam = pf.h.camera(center, look, width, resolution, tf) # Create a snapshot. # The return value of this function could also be accepted, modified (or saved # for later manipulation) and then put written out using write_bitmap. # clip_ratio applies a maximum to the function, which is set to that value # times the .std() of the array. cam.snapshot("%s_volume_rendered.png" % pf, clip_ratio=8.0)
[ "'''\n", "Based on the Simple Volume Rendering script from yt-project.org.\n", "http://yt-project.org/doc/cookbook/simple_plots.html#simple-volume-rendering\n", "\n", "Usage:\n", " python simple_volume_rendering.py PLOTFILE\n", "'''\n", "from yt.mods import *\n", "import sys\n", "\n", "# Load the dataset.\n", "plotfile = sys.argv[1]\n", "pf = load(plotfile)\n", "\n", "# Create a data container (like a sphere or region) that\n", "# represents the entire domain.\n", "dd = pf.h.all_data()\n", "\n", "# Get the minimum and maximum densities.\n", "mi, ma = dd.quantities[\"Extrema\"](\"Density\")[0]\n", "\n", "# Parameters\n", "numlevels = 5 # Number of Gaussians in the transfer function\n", "widths = 0.02 # Width of the Gaussians\n", "cmap = \"spectral\" # Color map\n", "center = [0.5, 0.5, 0.5]/pf[\"unitary\"] # Center of the volume rendering\n", "look = [0.5, 0.2, 0.7]/pf[\"unitary\"] # Look direction for the camera\n", "width = 1.0/pf[\"unitary\"] # Width of the image. Use this to zoom in/out\n", "resolution = 512 # Number of pixels in each dimension of the final image\n", "\n", "# Create a transfer function to map field values to colors.\n", "# We bump up our minimum to cut out some of the background fluid\n", "tf = ColorTransferFunction((np.log10(mi)+1, np.log10(ma)))\n", "\n", "# Add several Gausians to the transfer function, evenly spaced\n", "# between the min and max specified above with widths w\n", "tf.add_layers(numlevels, w=widths, colormap=cmap)\n", "\n", "# Create the camera object\n", "cam = pf.h.camera(center, look, width, resolution, tf)\n", "\n", "# Create a snapshot.\n", "# The return value of this function could also be accepted, modified (or saved\n", "# for later manipulation) and then put written out using write_bitmap.\n", "# clip_ratio applies a maximum to the function, which is set to that value\n", "# times the .std() of the array.\n", "cam.snapshot(\"%s_volume_rendered.png\" % pf, clip_ratio=8.0)\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0.011627906976744186, 0.010416666666666666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
47
0.000714
# """ # This is the interface that allows for creating nested lists. # You should not implement it, or speculate about its implementation # """ #class NestedInteger(object): # def isInteger(self): # """ # @return True if this NestedInteger holds a single integer, rather than a nested list. # :rtype bool # """ # # def getInteger(self): # """ # @return the single integer that this NestedInteger holds, if it holds a single integer # Return None if this NestedInteger holds a nested list # :rtype int # """ # # def getList(self): # """ # @return the nested list that this NestedInteger holds, if it holds a nested list # Return None if this NestedInteger holds a single integer # :rtype List[NestedInteger] # """ class NestedIterator(object): def __init__(self, nestedList): """ Initialize your data structure here. :type nestedList: List[NestedInteger] """ self.stack=[] self.hashmaping={} self.maxdepth=0 # reverse the order, when we pop we get the first for i in xrange(len(nestedList)-1,-1,-1): self.stack.append(nestedList[i]) unweight=weighted=0 while len(nestedList): nextlevel=[] for li in nestedList: if li.isInteger(): unweight+=li.getInteger() else: nextlevel.extend(li.getList()) weighted+=unweight nestedList=nextlevel #print weighted def next(self): """ :rtype: int """ return self.stack.pop().getInteger() def hasNext(self): """ :rtype: bool """ while self.stack: top=self.stack[-1] # if it's interge, directly return true if top.isInteger(): return True # if it's a list, put all the element in list to the stack # first get it out self.stack.pop() # then add them singlely for i in xrange(len(top.getList())-1,-1,-1): self.stack.append(top.getList()[i]) return False # Your NestedIterator object will be instantiated and called as such: # i, v = NestedIterator(nestedList), [] # while i.hasNext(): v.append(i.next())
[ "# \"\"\"\n", "# This is the interface that allows for creating nested lists.\n", "# You should not implement it, or speculate about its implementation\n", "# \"\"\"\n", "#class NestedInteger(object):\n", "# def isInteger(self):\n", "# \"\"\"\n", "# @return True if this NestedInteger holds a single integer, rather than a nested list.\n", "# :rtype bool\n", "# \"\"\"\n", "#\n", "# def getInteger(self):\n", "# \"\"\"\n", "# @return the single integer that this NestedInteger holds, if it holds a single integer\n", "# Return None if this NestedInteger holds a nested list\n", "# :rtype int\n", "# \"\"\"\n", "#\n", "# def getList(self):\n", "# \"\"\"\n", "# @return the nested list that this NestedInteger holds, if it holds a nested list\n", "# Return None if this NestedInteger holds a single integer\n", "# :rtype List[NestedInteger]\n", "# \"\"\"\n", "\n", "class NestedIterator(object):\n", "\n", " def __init__(self, nestedList):\n", " \"\"\"\n", " Initialize your data structure here.\n", " :type nestedList: List[NestedInteger]\n", " \"\"\"\n", " self.stack=[]\n", " self.hashmaping={}\n", " self.maxdepth=0\n", " # reverse the order, when we pop we get the first\n", " for i in xrange(len(nestedList)-1,-1,-1):\n", " self.stack.append(nestedList[i])\n", " \n", " unweight=weighted=0\n", " while len(nestedList):\n", " nextlevel=[]\n", " for li in nestedList:\n", " if li.isInteger():\n", " unweight+=li.getInteger()\n", " else:\n", " nextlevel.extend(li.getList())\n", " weighted+=unweight\n", " nestedList=nextlevel\n", " #print weighted\n", " \n", " def next(self):\n", " \"\"\"\n", " :rtype: int\n", " \"\"\"\n", " return self.stack.pop().getInteger()\n", "\n", " def hasNext(self):\n", " \"\"\"\n", " :rtype: bool\n", " \"\"\"\n", " while self.stack:\n", " top=self.stack[-1]\n", " # if it's interge, directly return true\n", " if top.isInteger():\n", " return True\n", " # if it's a list, put all the element in list to the stack\n", " # first get it out\n", " self.stack.pop()\n", " # then add them singlely\n", " for i in xrange(len(top.getList())-1,-1,-1):\n", " self.stack.append(top.getList()[i])\n", " \n", " return False\n", " \n", " \n", "\n", "# Your NestedIterator object will be instantiated and called as such:\n", "# i, v = NestedIterator(nestedList), []\n", "# while i.hasNext(): v.append(i.next())" ]
[ 0, 0, 0, 0, 0.03333333333333333, 0, 0, 0.010526315789473684, 0, 0, 0, 0, 0, 0.010416666666666666, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.045454545454545456, 0.037037037037037035, 0.041666666666666664, 0, 0.04, 0, 0.1111111111111111, 0.07142857142857142, 0, 0.04, 0, 0, 0.021739130434782608, 0, 0, 0.03225806451612903, 0.030303030303030304, 0.041666666666666664, 0.1111111111111111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03225806451612903, 0, 0, 0, 0, 0, 0, 0, 0.03508771929824561, 0, 0.1111111111111111, 0, 0.07692307692307693, 0.1111111111111111, 0, 0.014285714285714285, 0, 0.02564102564102564 ]
80
0.013695
# -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-10-18 23:20 from __future__ import unicode_literals from django.conf import settings import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('password', models.CharField(max_length=128, null=True, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, null=True, unique=True, validators=[django.contrib.auth.validators.ASCIIUsernameValidator()], verbose_name='username')), ('first_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='first name')), ('last_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='last name')), ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('id', models.BigAutoField(primary_key=True, serialize=False)), ('producer', models.CharField(max_length=250)), ('p_iva', models.CharField(max_length=11)), ('c_fiscale', models.CharField(max_length=16)), ('cell', models.CharField(max_length=10, null=True)), ('fax', models.CharField(max_length=10, null=True)), ('reference', models.CharField(max_length=50, null=True)), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'verbose_name': 'Utenti', 'verbose_name_plural': 'Utenti', }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), migrations.CreateModel( name='Cer', fields=[ ('id', models.BigAutoField(primary_key=True, serialize=False)), ('codice', models.CharField(max_length=10)), ('descrizione', models.CharField(max_length=800)), ], options={ 'verbose_name': 'CER', 'verbose_name_plural': 'CER', }, ), migrations.CreateModel( name='Module', fields=[ ('id', models.BigAutoField(primary_key=True, serialize=False)), ('date', models.DateField(auto_now_add=True)), ('pdf', models.TextField(db_column='pdf')), ('typeModele', models.CharField(max_length=20)), ], ), migrations.CreateModel( name='ModulePreventivo', fields=[ ('module_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='service.Module')), ], options={ 'verbose_name': 'module_preventivo', 'verbose_name_plural': 'Moduli preventivo', }, bases=('service.module',), ), migrations.CreateModel( name='ModuleRitiro', fields=[ ('module_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='service.Module')), ], options={ 'verbose_name': 'module_ritiro', 'verbose_name_plural': 'Moduli ritiro', }, bases=('service.module',), ), migrations.AddField( model_name='module', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterUniqueTogether( name='user', unique_together=set([('producer', 'email')]), ), ]
[ "# -*- coding: utf-8 -*-\n", "# Generated by Django 1.11.3 on 2017-10-18 23:20\n", "from __future__ import unicode_literals\n", "\n", "from django.conf import settings\n", "import django.contrib.auth.models\n", "import django.contrib.auth.validators\n", "from django.db import migrations, models\n", "import django.db.models.deletion\n", "import django.utils.timezone\n", "\n", "\n", "class Migration(migrations.Migration):\n", "\n", " initial = True\n", "\n", " dependencies = [\n", " ('auth', '0008_alter_user_username_max_length'),\n", " ]\n", "\n", " operations = [\n", " migrations.CreateModel(\n", " name='User',\n", " fields=[\n", " ('password', models.CharField(max_length=128, null=True, verbose_name='password')),\n", " ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),\n", " ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),\n", " ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, null=True, unique=True, validators=[django.contrib.auth.validators.ASCIIUsernameValidator()], verbose_name='username')),\n", " ('first_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='first name')),\n", " ('last_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='last name')),\n", " ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),\n", " ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),\n", " ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),\n", " ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),\n", " ('id', models.BigAutoField(primary_key=True, serialize=False)),\n", " ('producer', models.CharField(max_length=250)),\n", " ('p_iva', models.CharField(max_length=11)),\n", " ('c_fiscale', models.CharField(max_length=16)),\n", " ('cell', models.CharField(max_length=10, null=True)),\n", " ('fax', models.CharField(max_length=10, null=True)),\n", " ('reference', models.CharField(max_length=50, null=True)),\n", " ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),\n", " ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),\n", " ],\n", " options={\n", " 'verbose_name': 'Utenti',\n", " 'verbose_name_plural': 'Utenti',\n", " },\n", " managers=[\n", " ('objects', django.contrib.auth.models.UserManager()),\n", " ],\n", " ),\n", " migrations.CreateModel(\n", " name='Cer',\n", " fields=[\n", " ('id', models.BigAutoField(primary_key=True, serialize=False)),\n", " ('codice', models.CharField(max_length=10)),\n", " ('descrizione', models.CharField(max_length=800)),\n", " ],\n", " options={\n", " 'verbose_name': 'CER',\n", " 'verbose_name_plural': 'CER',\n", " },\n", " ),\n", " migrations.CreateModel(\n", " name='Module',\n", " fields=[\n", " ('id', models.BigAutoField(primary_key=True, serialize=False)),\n", " ('date', models.DateField(auto_now_add=True)),\n", " ('pdf', models.TextField(db_column='pdf')),\n", " ('typeModele', models.CharField(max_length=20)),\n", " ],\n", " ),\n", " migrations.CreateModel(\n", " name='ModulePreventivo',\n", " fields=[\n", " ('module_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='service.Module')),\n", " ],\n", " options={\n", " 'verbose_name': 'module_preventivo',\n", " 'verbose_name_plural': 'Moduli preventivo',\n", " },\n", " bases=('service.module',),\n", " ),\n", " migrations.CreateModel(\n", " name='ModuleRitiro',\n", " fields=[\n", " ('module_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='service.Module')),\n", " ],\n", " options={\n", " 'verbose_name': 'module_ritiro',\n", " 'verbose_name_plural': 'Moduli ritiro',\n", " },\n", " bases=('service.module',),\n", " ),\n", " migrations.AddField(\n", " model_name='module',\n", " name='user',\n", " field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),\n", " ),\n", " migrations.AlterUniqueTogether(\n", " name='user',\n", " unique_together=set([('producer', 'email')]),\n", " ),\n", " ]\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0.009615384615384616, 0.005076142131979695, 0.0029498525073746312, 0.008695652173913044, 0.008849557522123894, 0.009615384615384616, 0.006024096385542169, 0.004901960784313725, 0.00847457627118644, 0, 0, 0, 0, 0, 0, 0, 0.003745318352059925, 0.004347826086956522, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.005208333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.005208333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009009009009009009, 0, 0, 0, 0, 0, 0 ]
105
0.000969
# Implement atoi to convert a string to an integer. # Hint: Carefully consider all possible input cases. If you want a challenge, please do not see below and ask yourself what are the possible input cases. # Notes: It is intended for this problem to be specified vaguely (ie, no given input specs). You are responsible to gather all the input requirements up front. # Update (2015-02-10): # The signature of the C++ function had been updated. If you still see your function signature accepts a const char * argument, please click the reload button to reset your code definition. # spoilers alert... click to show requirements for atoi. # Requirements for atoi: # The function first discards as many whitespace characters as necessary until the first non-whitespace character is found. # Then, starting from this character, takes an optional initial plus or minus sign followed by as many numerical digits as possible, and interprets them as a numerical value. # The string can contain additional characters after those that form the integral number, # which are ignored and have no effect on the behavior of this function. # If the first sequence of non-whitespace characters in str is not a valid integral number, # or if no such sequence exists because either str is empty or it contains only whitespace characters, # no conversion is performed. # If no valid conversion could be performed, a zero value is returned. # If the correct value is out of the range of representable values, INT_MAX (2147483647) or INT_MIN (-2147483648) is returned. class Solution: # @param {string} str # @return {integer} def myAtoi(self, s): s = s.strip() if not s: return 0 value, negative = 0, s[0] == '-' digits = set([str(i) for i in xrange(10)]) if s[0] in ('+', '-'): s = s[1:] for c in s: if c in digits: value = 10*value + int(c) else: break value = -value if negative else value if value > 2147483647: return 2147483647 elif value < -2147483648: return -2147483648 return value
[ "# Implement atoi to convert a string to an integer.\n", "\n", "# Hint: Carefully consider all possible input cases. If you want a challenge, please do not see below and ask yourself what are the possible input cases.\n", "\n", "# Notes: It is intended for this problem to be specified vaguely (ie, no given input specs). You are responsible to gather all the input requirements up front.\n", "\n", "# Update (2015-02-10):\n", "# The signature of the C++ function had been updated. If you still see your function signature accepts a const char * argument, please click the reload button to reset your code definition.\n", "\n", "# spoilers alert... click to show requirements for atoi.\n", "# Requirements for atoi:\n", "\n", "# The function first discards as many whitespace characters as necessary until the first non-whitespace character is found. \n", "# Then, starting from this character, takes an optional initial plus or minus sign followed by as many numerical digits as possible, and interprets them as a numerical value.\n", "\n", "# The string can contain additional characters after those that form the integral number, \n", "# which are ignored and have no effect on the behavior of this function.\n", "\n", "# If the first sequence of non-whitespace characters in str is not a valid integral number, \n", "# or if no such sequence exists because either str is empty or it contains only whitespace characters, \n", "# no conversion is performed.\n", "\n", "# If no valid conversion could be performed, a zero value is returned. \n", "# If the correct value is out of the range of representable values, INT_MAX (2147483647) or INT_MIN (-2147483648) is returned.\n", "\n", "\n", "class Solution:\n", " # @param {string} str\n", " # @return {integer}\n", " def myAtoi(self, s):\n", " s = s.strip()\n", " if not s:\n", " return 0\n", "\n", " value, negative = 0, s[0] == '-'\n", " digits = set([str(i) for i in xrange(10)])\n", "\n", " if s[0] in ('+', '-'):\n", " s = s[1:]\n", "\n", " for c in s:\n", " if c in digits:\n", " value = 10*value + int(c)\n", " else:\n", " break\n", "\n", " value = -value if negative else value\n", "\n", " if value > 2147483647:\n", " return 2147483647\n", " elif value < -2147483648:\n", " return -2147483648\n", "\n", " return value\n" ]
[ 0, 0, 0.006493506493506494, 0, 0.00625, 0, 0, 0.005263157894736842, 0, 0, 0, 0, 0.016, 0.005714285714285714, 0, 0.02197802197802198, 0, 0, 0.021505376344086023, 0.019230769230769232, 0, 0, 0.013888888888888888, 0.007874015748031496, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
54
0.0023
# coding: utf8 from ...symbols import ( ADJ, DET, NOUN, NUM, PRON, PROPN, PUNCT, VERB, POS ) from ...lemmatizer import Lemmatizer class RussianLemmatizer(Lemmatizer): _morph = None def __init__(self): super(RussianLemmatizer, self).__init__() try: from pymorphy2 import MorphAnalyzer except ImportError: raise ImportError( 'The Russian lemmatizer requires the pymorphy2 library: ' 'try to fix it with "pip install pymorphy2==0.8"') if RussianLemmatizer._morph is None: RussianLemmatizer._morph = MorphAnalyzer() def __call__(self, string, univ_pos, morphology=None): univ_pos = self.normalize_univ_pos(univ_pos) if univ_pos == 'PUNCT': return [PUNCT_RULES.get(string, string)] if univ_pos not in ('ADJ', 'DET', 'NOUN', 'NUM', 'PRON', 'PROPN', 'VERB'): # Skip unchangeable pos return [string.lower()] analyses = self._morph.parse(string) filtered_analyses = [] for analysis in analyses: if not analysis.is_known: # Skip suggested parse variant for unknown word for pymorphy continue analysis_pos, _ = oc2ud(str(analysis.tag)) if analysis_pos == univ_pos \ or (analysis_pos in ('NOUN', 'PROPN') and univ_pos in ('NOUN', 'PROPN')): filtered_analyses.append(analysis) if not len(filtered_analyses): return [string.lower()] if morphology is None or (len(morphology) == 1 and POS in morphology): return list(set([analysis.normal_form for analysis in filtered_analyses])) if univ_pos in ('ADJ', 'DET', 'NOUN', 'PROPN'): features_to_compare = ['Case', 'Number', 'Gender'] elif univ_pos == 'NUM': features_to_compare = ['Case', 'Gender'] elif univ_pos == 'PRON': features_to_compare = ['Case', 'Number', 'Gender', 'Person'] else: # VERB features_to_compare = ['Aspect', 'Gender', 'Mood', 'Number', 'Tense', 'VerbForm', 'Voice'] analyses, filtered_analyses = filtered_analyses, [] for analysis in analyses: _, analysis_morph = oc2ud(str(analysis.tag)) for feature in features_to_compare: if (feature in morphology and feature in analysis_morph and morphology[feature] != analysis_morph[feature]): break else: filtered_analyses.append(analysis) if not len(filtered_analyses): return [string.lower()] return list(set([analysis.normal_form for analysis in filtered_analyses])) @staticmethod def normalize_univ_pos(univ_pos): if isinstance(univ_pos, str): return univ_pos.upper() symbols_to_str = { ADJ: 'ADJ', DET: 'DET', NOUN: 'NOUN', NUM: 'NUM', PRON: 'PRON', PROPN: 'PROPN', PUNCT: 'PUNCT', VERB: 'VERB' } if univ_pos in symbols_to_str: return symbols_to_str[univ_pos] return None def is_base_form(self, univ_pos, morphology=None): # TODO raise NotImplementedError def det(self, string, morphology=None): return self(string, 'det', morphology) def num(self, string, morphology=None): return self(string, 'num', morphology) def pron(self, string, morphology=None): return self(string, 'pron', morphology) def lookup(self, string): analyses = self._morph.parse(string) if len(analyses) == 1: return analyses[0].normal_form return string def oc2ud(oc_tag): gram_map = { '_POS': { 'ADJF': 'ADJ', 'ADJS': 'ADJ', 'ADVB': 'ADV', 'Apro': 'DET', 'COMP': 'ADJ', # Can also be an ADV - unchangeable 'CONJ': 'CCONJ', # Can also be a SCONJ - both unchangeable ones 'GRND': 'VERB', 'INFN': 'VERB', 'INTJ': 'INTJ', 'NOUN': 'NOUN', 'NPRO': 'PRON', 'NUMR': 'NUM', 'NUMB': 'NUM', 'PNCT': 'PUNCT', 'PRCL': 'PART', 'PREP': 'ADP', 'PRTF': 'VERB', 'PRTS': 'VERB', 'VERB': 'VERB', }, 'Animacy': { 'anim': 'Anim', 'inan': 'Inan', }, 'Aspect': { 'impf': 'Imp', 'perf': 'Perf', }, 'Case': { 'ablt': 'Ins', 'accs': 'Acc', 'datv': 'Dat', 'gen1': 'Gen', 'gen2': 'Gen', 'gent': 'Gen', 'loc2': 'Loc', 'loct': 'Loc', 'nomn': 'Nom', 'voct': 'Voc', }, 'Degree': { 'COMP': 'Cmp', 'Supr': 'Sup', }, 'Gender': { 'femn': 'Fem', 'masc': 'Masc', 'neut': 'Neut', }, 'Mood': { 'impr': 'Imp', 'indc': 'Ind', }, 'Number': { 'plur': 'Plur', 'sing': 'Sing', }, 'NumForm': { 'NUMB': 'Digit', }, 'Person': { '1per': '1', '2per': '2', '3per': '3', 'excl': '2', 'incl': '1', }, 'Tense': { 'futr': 'Fut', 'past': 'Past', 'pres': 'Pres', }, 'Variant': { 'ADJS': 'Brev', 'PRTS': 'Brev', }, 'VerbForm': { 'GRND': 'Conv', 'INFN': 'Inf', 'PRTF': 'Part', 'PRTS': 'Part', 'VERB': 'Fin', }, 'Voice': { 'actv': 'Act', 'pssv': 'Pass', }, 'Abbr': { 'Abbr': 'Yes' } } pos = 'X' morphology = dict() unmatched = set() grams = oc_tag.replace(' ', ',').split(',') for gram in grams: match = False for categ, gmap in sorted(gram_map.items()): if gram in gmap: match = True if categ == '_POS': pos = gmap[gram] else: morphology[categ] = gmap[gram] if not match: unmatched.add(gram) while len(unmatched) > 0: gram = unmatched.pop() if gram in ('Name', 'Patr', 'Surn', 'Geox', 'Orgn'): pos = 'PROPN' elif gram == 'Auxt': pos = 'AUX' elif gram == 'Pltm': morphology['Number'] = 'Ptan' return pos, morphology PUNCT_RULES = { "«": "\"", "»": "\"" }
[ "# coding: utf8\n", "from ...symbols import (\n", " ADJ, DET, NOUN, NUM, PRON, PROPN, PUNCT, VERB, POS\n", ")\n", "from ...lemmatizer import Lemmatizer\n", "\n", "\n", "class RussianLemmatizer(Lemmatizer):\n", " _morph = None\n", "\n", " def __init__(self):\n", " super(RussianLemmatizer, self).__init__()\n", " try:\n", " from pymorphy2 import MorphAnalyzer\n", " except ImportError:\n", " raise ImportError(\n", " 'The Russian lemmatizer requires the pymorphy2 library: '\n", " 'try to fix it with \"pip install pymorphy2==0.8\"')\n", "\n", " if RussianLemmatizer._morph is None:\n", " RussianLemmatizer._morph = MorphAnalyzer()\n", "\n", " def __call__(self, string, univ_pos, morphology=None):\n", " univ_pos = self.normalize_univ_pos(univ_pos)\n", " if univ_pos == 'PUNCT':\n", " return [PUNCT_RULES.get(string, string)]\n", "\n", " if univ_pos not in ('ADJ', 'DET', 'NOUN', 'NUM', 'PRON', 'PROPN', 'VERB'):\n", " # Skip unchangeable pos\n", " return [string.lower()]\n", "\n", " analyses = self._morph.parse(string)\n", " filtered_analyses = []\n", " for analysis in analyses:\n", " if not analysis.is_known:\n", " # Skip suggested parse variant for unknown word for pymorphy\n", " continue\n", " analysis_pos, _ = oc2ud(str(analysis.tag))\n", " if analysis_pos == univ_pos \\\n", " or (analysis_pos in ('NOUN', 'PROPN') and univ_pos in ('NOUN', 'PROPN')):\n", " filtered_analyses.append(analysis)\n", "\n", " if not len(filtered_analyses):\n", " return [string.lower()]\n", " if morphology is None or (len(morphology) == 1 and POS in morphology):\n", " return list(set([analysis.normal_form for analysis in filtered_analyses]))\n", "\n", " if univ_pos in ('ADJ', 'DET', 'NOUN', 'PROPN'):\n", " features_to_compare = ['Case', 'Number', 'Gender']\n", " elif univ_pos == 'NUM':\n", " features_to_compare = ['Case', 'Gender']\n", " elif univ_pos == 'PRON':\n", " features_to_compare = ['Case', 'Number', 'Gender', 'Person']\n", " else: # VERB\n", " features_to_compare = ['Aspect', 'Gender', 'Mood', 'Number', 'Tense', 'VerbForm', 'Voice']\n", "\n", " analyses, filtered_analyses = filtered_analyses, []\n", " for analysis in analyses:\n", " _, analysis_morph = oc2ud(str(analysis.tag))\n", " for feature in features_to_compare:\n", " if (feature in morphology and feature in analysis_morph\n", " and morphology[feature] != analysis_morph[feature]):\n", " break\n", " else:\n", " filtered_analyses.append(analysis)\n", "\n", " if not len(filtered_analyses):\n", " return [string.lower()]\n", " return list(set([analysis.normal_form for analysis in filtered_analyses]))\n", "\n", " @staticmethod\n", " def normalize_univ_pos(univ_pos):\n", " if isinstance(univ_pos, str):\n", " return univ_pos.upper()\n", "\n", " symbols_to_str = {\n", " ADJ: 'ADJ',\n", " DET: 'DET',\n", " NOUN: 'NOUN',\n", " NUM: 'NUM',\n", " PRON: 'PRON',\n", " PROPN: 'PROPN',\n", " PUNCT: 'PUNCT',\n", " VERB: 'VERB'\n", " }\n", " if univ_pos in symbols_to_str:\n", " return symbols_to_str[univ_pos]\n", " return None\n", "\n", " def is_base_form(self, univ_pos, morphology=None):\n", " # TODO\n", " raise NotImplementedError\n", "\n", " def det(self, string, morphology=None):\n", " return self(string, 'det', morphology)\n", "\n", " def num(self, string, morphology=None):\n", " return self(string, 'num', morphology)\n", "\n", " def pron(self, string, morphology=None):\n", " return self(string, 'pron', morphology)\n", "\n", " def lookup(self, string):\n", " analyses = self._morph.parse(string)\n", " if len(analyses) == 1:\n", " return analyses[0].normal_form\n", " return string\n", "\n", "\n", "def oc2ud(oc_tag):\n", " gram_map = {\n", " '_POS': {\n", " 'ADJF': 'ADJ',\n", " 'ADJS': 'ADJ',\n", " 'ADVB': 'ADV',\n", " 'Apro': 'DET',\n", " 'COMP': 'ADJ', # Can also be an ADV - unchangeable\n", " 'CONJ': 'CCONJ', # Can also be a SCONJ - both unchangeable ones\n", " 'GRND': 'VERB',\n", " 'INFN': 'VERB',\n", " 'INTJ': 'INTJ',\n", " 'NOUN': 'NOUN',\n", " 'NPRO': 'PRON',\n", " 'NUMR': 'NUM',\n", " 'NUMB': 'NUM',\n", " 'PNCT': 'PUNCT',\n", " 'PRCL': 'PART',\n", " 'PREP': 'ADP',\n", " 'PRTF': 'VERB',\n", " 'PRTS': 'VERB',\n", " 'VERB': 'VERB',\n", " },\n", " 'Animacy': {\n", " 'anim': 'Anim',\n", " 'inan': 'Inan',\n", " },\n", " 'Aspect': {\n", " 'impf': 'Imp',\n", " 'perf': 'Perf',\n", " },\n", " 'Case': {\n", " 'ablt': 'Ins',\n", " 'accs': 'Acc',\n", " 'datv': 'Dat',\n", " 'gen1': 'Gen',\n", " 'gen2': 'Gen',\n", " 'gent': 'Gen',\n", " 'loc2': 'Loc',\n", " 'loct': 'Loc',\n", " 'nomn': 'Nom',\n", " 'voct': 'Voc',\n", " },\n", " 'Degree': {\n", " 'COMP': 'Cmp',\n", " 'Supr': 'Sup',\n", " },\n", " 'Gender': {\n", " 'femn': 'Fem',\n", " 'masc': 'Masc',\n", " 'neut': 'Neut',\n", " },\n", " 'Mood': {\n", " 'impr': 'Imp',\n", " 'indc': 'Ind',\n", " },\n", " 'Number': {\n", " 'plur': 'Plur',\n", " 'sing': 'Sing',\n", " },\n", " 'NumForm': {\n", " 'NUMB': 'Digit',\n", " },\n", " 'Person': {\n", " '1per': '1',\n", " '2per': '2',\n", " '3per': '3',\n", " 'excl': '2',\n", " 'incl': '1',\n", " },\n", " 'Tense': {\n", " 'futr': 'Fut',\n", " 'past': 'Past',\n", " 'pres': 'Pres',\n", " },\n", " 'Variant': {\n", " 'ADJS': 'Brev',\n", " 'PRTS': 'Brev',\n", " },\n", " 'VerbForm': {\n", " 'GRND': 'Conv',\n", " 'INFN': 'Inf',\n", " 'PRTF': 'Part',\n", " 'PRTS': 'Part',\n", " 'VERB': 'Fin',\n", " },\n", " 'Voice': {\n", " 'actv': 'Act',\n", " 'pssv': 'Pass',\n", " },\n", " 'Abbr': {\n", " 'Abbr': 'Yes'\n", " }\n", " }\n", "\n", " pos = 'X'\n", " morphology = dict()\n", " unmatched = set()\n", "\n", " grams = oc_tag.replace(' ', ',').split(',')\n", " for gram in grams:\n", " match = False\n", " for categ, gmap in sorted(gram_map.items()):\n", " if gram in gmap:\n", " match = True\n", " if categ == '_POS':\n", " pos = gmap[gram]\n", " else:\n", " morphology[categ] = gmap[gram]\n", " if not match:\n", " unmatched.add(gram)\n", "\n", " while len(unmatched) > 0:\n", " gram = unmatched.pop()\n", " if gram in ('Name', 'Patr', 'Surn', 'Geox', 'Orgn'):\n", " pos = 'PROPN'\n", " elif gram == 'Auxt':\n", " pos = 'AUX'\n", " elif gram == 'Pltm':\n", " morphology['Number'] = 'Ptan'\n", "\n", " return pos, morphology\n", "\n", "\n", "PUNCT_RULES = {\n", " \"«\": \"\\\"\",\n", " \"»\": \"\\\"\"\n", "}\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010638297872340425, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0, 0, 0.009708737864077669, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
237
0.000236
""" Record Arrays ============= Record arrays expose the fields of structured arrays as properties. Most commonly, ndarrays contain elements of a single type, e.g. floats, integers, bools etc. However, it is possible for elements to be combinations of these using structured types, such as:: >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', int), ('y', float)]) >>> a array([(1, 2.0), (1, 2.0)], dtype=[('x', '<i4'), ('y', '<f8')]) Here, each element consists of two fields: x (and int), and y (a float). This is known as a structured array. The different fields are analogous to columns in a spread-sheet. The different fields can be accessed as one would a dictionary:: >>> a['x'] array([1, 1]) >>> a['y'] array([ 2., 2.]) Record arrays allow us to access fields as properties:: >>> ar = np.rec.array(a) >>> ar.x array([1, 1]) >>> ar.y array([ 2., 2.]) """ from __future__ import division, absolute_import, print_function import sys import os import warnings from . import numeric as sb from . import numerictypes as nt from numpy.compat import isfileobj, bytes, long, unicode, os_fspath from numpy.core.overrides import set_module from .arrayprint import get_printoptions # All of the functions allow formats to be a dtype __all__ = ['record', 'recarray', 'format_parser'] ndarray = sb.ndarray _byteorderconv = {'b':'>', 'l':'<', 'n':'=', 'B':'>', 'L':'<', 'N':'=', 'S':'s', 's':'s', '>':'>', '<':'<', '=':'=', '|':'|', 'I':'|', 'i':'|'} # formats regular expression # allows multidimension spec with a tuple syntax in front # of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 ' # are equally allowed numfmt = nt.typeDict def find_duplicate(list): """Find duplication in a list, return a list of duplicated elements""" dup = [] for i in range(len(list)): if (list[i] in list[i + 1:]): if (list[i] not in dup): dup.append(list[i]) return dup @set_module('numpy') class format_parser(object): """ Class to convert formats, names, titles description to a dtype. After constructing the format_parser object, the dtype attribute is the converted data-type: ``dtype = format_parser(formats, names, titles).dtype`` Attributes ---------- dtype : dtype The converted data-type. Parameters ---------- formats : str or list of str The format description, either specified as a string with comma-separated format descriptions in the form ``'f8, i4, a5'``, or a list of format description strings in the form ``['f8', 'i4', 'a5']``. names : str or list/tuple of str The field names, either specified as a comma-separated string in the form ``'col1, col2, col3'``, or as a list or tuple of strings in the form ``['col1', 'col2', 'col3']``. An empty list can be used, in that case default field names ('f0', 'f1', ...) are used. titles : sequence Sequence of title strings. An empty list can be used to leave titles out. aligned : bool, optional If True, align the fields by padding as the C-compiler would. Default is False. byteorder : str, optional If specified, all the fields will be changed to the provided byte-order. Otherwise, the default byte-order is used. For all available string specifiers, see `dtype.newbyteorder`. See Also -------- dtype, typename, sctype2char Examples -------- >>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], ... ['T1', 'T2', 'T3']).dtype dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4'), (('T3', 'col3'), '|S5')]) `names` and/or `titles` can be empty lists. If `titles` is an empty list, titles will simply not appear. If `names` is empty, default field names will be used. >>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], ... []).dtype dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '|S5')]) >>> np.format_parser(['f8', 'i4', 'a5'], [], []).dtype dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', '|S5')]) """ def __init__(self, formats, names, titles, aligned=False, byteorder=None): self._parseFormats(formats, aligned) self._setfieldnames(names, titles) self._createdescr(byteorder) self.dtype = self._descr def _parseFormats(self, formats, aligned=0): """ Parse the field formats """ if formats is None: raise ValueError("Need formats argument") if isinstance(formats, list): if len(formats) < 2: formats.append('') formats = ','.join(formats) dtype = sb.dtype(formats, aligned) fields = dtype.fields if fields is None: dtype = sb.dtype([('f1', dtype)], aligned) fields = dtype.fields keys = dtype.names self._f_formats = [fields[key][0] for key in keys] self._offsets = [fields[key][1] for key in keys] self._nfields = len(keys) def _setfieldnames(self, names, titles): """convert input field names into a list and assign to the _names attribute """ if (names): if (type(names) in [list, tuple]): pass elif isinstance(names, (str, unicode)): names = names.split(',') else: raise NameError("illegal input names %s" % repr(names)) self._names = [n.strip() for n in names[:self._nfields]] else: self._names = [] # if the names are not specified, they will be assigned as # "f0, f1, f2,..." # if not enough names are specified, they will be assigned as "f[n], # f[n+1],..." etc. where n is the number of specified names..." self._names += ['f%d' % i for i in range(len(self._names), self._nfields)] # check for redundant names _dup = find_duplicate(self._names) if _dup: raise ValueError("Duplicate field names: %s" % _dup) if (titles): self._titles = [n.strip() for n in titles[:self._nfields]] else: self._titles = [] titles = [] if (self._nfields > len(titles)): self._titles += [None] * (self._nfields - len(titles)) def _createdescr(self, byteorder): descr = sb.dtype({'names':self._names, 'formats':self._f_formats, 'offsets':self._offsets, 'titles':self._titles}) if (byteorder is not None): byteorder = _byteorderconv[byteorder[0]] descr = descr.newbyteorder(byteorder) self._descr = descr class record(nt.void): """A data-type scalar that allows field access as attribute lookup. """ # manually set name and module so that this class's type shows up # as numpy.record when printed __name__ = 'record' __module__ = 'numpy' def __repr__(self): if get_printoptions()['legacy'] == '1.13': return self.__str__() return super(record, self).__repr__() def __str__(self): if get_printoptions()['legacy'] == '1.13': return str(self.item()) return super(record, self).__str__() def __getattribute__(self, attr): if attr in ['setfield', 'getfield', 'dtype']: return nt.void.__getattribute__(self, attr) try: return nt.void.__getattribute__(self, attr) except AttributeError: pass fielddict = nt.void.__getattribute__(self, 'dtype').fields res = fielddict.get(attr, None) if res: obj = self.getfield(*res[:2]) # if it has fields return a record, # otherwise return the object try: dt = obj.dtype except AttributeError: #happens if field is Object type return obj if dt.fields: return obj.view((self.__class__, obj.dtype.fields)) return obj else: raise AttributeError("'record' object has no " "attribute '%s'" % attr) def __setattr__(self, attr, val): if attr in ['setfield', 'getfield', 'dtype']: raise AttributeError("Cannot set '%s' attribute" % attr) fielddict = nt.void.__getattribute__(self, 'dtype').fields res = fielddict.get(attr, None) if res: return self.setfield(val, *res[:2]) else: if getattr(self, attr, None): return nt.void.__setattr__(self, attr, val) else: raise AttributeError("'record' object has no " "attribute '%s'" % attr) def __getitem__(self, indx): obj = nt.void.__getitem__(self, indx) # copy behavior of record.__getattribute__, if isinstance(obj, nt.void) and obj.dtype.fields: return obj.view((self.__class__, obj.dtype.fields)) else: # return a single element return obj def pprint(self): """Pretty-print all fields.""" # pretty-print all fields names = self.dtype.names maxlen = max(len(name) for name in names) fmt = '%% %ds: %%s' % maxlen rows = [fmt % (name, getattr(self, name)) for name in names] return "\n".join(rows) # The recarray is almost identical to a standard array (which supports # named fields already) The biggest difference is that it can use # attribute-lookup to find the fields and it is constructed using # a record. # If byteorder is given it forces a particular byteorder on all # the fields (and any subfields) class recarray(ndarray): """Construct an ndarray that allows field access using attributes. Arrays may have a data-types containing fields, analogous to columns in a spread sheet. An example is ``[(x, int), (y, float)]``, where each entry in the array is a pair of ``(int, float)``. Normally, these attributes are accessed using dictionary lookups such as ``arr['x']`` and ``arr['y']``. Record arrays allow the fields to be accessed as members of the array, using ``arr.x`` and ``arr.y``. Parameters ---------- shape : tuple Shape of output array. dtype : data-type, optional The desired data-type. By default, the data-type is determined from `formats`, `names`, `titles`, `aligned` and `byteorder`. formats : list of data-types, optional A list containing the data-types for the different columns, e.g. ``['i4', 'f8', 'i4']``. `formats` does *not* support the new convention of using types directly, i.e. ``(int, float, int)``. Note that `formats` must be a list, not a tuple. Given that `formats` is somewhat limited, we recommend specifying `dtype` instead. names : tuple of str, optional The name of each column, e.g. ``('x', 'y', 'z')``. buf : buffer, optional By default, a new array is created of the given shape and data-type. If `buf` is specified and is an object exposing the buffer interface, the array will use the memory from the existing buffer. In this case, the `offset` and `strides` keywords are available. Other Parameters ---------------- titles : tuple of str, optional Aliases for column names. For example, if `names` were ``('x', 'y', 'z')`` and `titles` is ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``. byteorder : {'<', '>', '='}, optional Byte-order for all fields. aligned : bool, optional Align the fields in memory as the C-compiler would. strides : tuple of ints, optional Buffer (`buf`) is interpreted according to these strides (strides define how many bytes each array element, row, column, etc. occupy in memory). offset : int, optional Start reading buffer (`buf`) from this offset onwards. order : {'C', 'F'}, optional Row-major (C-style) or column-major (Fortran-style) order. Returns ------- rec : recarray Empty array of the given shape and type. See Also -------- rec.fromrecords : Construct a record array from data. record : fundamental data-type for `recarray`. format_parser : determine a data-type from formats, names, titles. Notes ----- This constructor can be compared to ``empty``: it creates a new record array but does not fill it with data. To create a record array from data, use one of the following methods: 1. Create a standard ndarray and convert it to a record array, using ``arr.view(np.recarray)`` 2. Use the `buf` keyword. 3. Use `np.rec.fromrecords`. Examples -------- Create an array with two fields, ``x`` and ``y``: >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)]) >>> x array([(1.0, 2), (3.0, 4)], dtype=[('x', '<f8'), ('y', '<i4')]) >>> x['x'] array([ 1., 3.]) View the array as a record array: >>> x = x.view(np.recarray) >>> x.x array([ 1., 3.]) >>> x.y array([2, 4]) Create a new, empty record array: >>> np.recarray((2,), ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP rec.array([(-1073741821, 1.2249118382103472e-301, 24547520), (3471280, 1.2134086255804012e-316, 0)], dtype=[('x', '<i4'), ('y', '<f8'), ('z', '<i4')]) """ # manually set name and module so that this class's type shows # up as "numpy.recarray" when printed __name__ = 'recarray' __module__ = 'numpy' def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None, formats=None, names=None, titles=None, byteorder=None, aligned=False, order='C'): if dtype is not None: descr = sb.dtype(dtype) else: descr = format_parser(formats, names, titles, aligned, byteorder)._descr if buf is None: self = ndarray.__new__(subtype, shape, (record, descr), order=order) else: self = ndarray.__new__(subtype, shape, (record, descr), buffer=buf, offset=offset, strides=strides, order=order) return self def __array_finalize__(self, obj): if self.dtype.type is not record and self.dtype.fields: # if self.dtype is not np.record, invoke __setattr__ which will # convert it to a record if it is a void dtype. self.dtype = self.dtype def __getattribute__(self, attr): # See if ndarray has this attr, and return it if so. (note that this # means a field with the same name as an ndarray attr cannot be # accessed by attribute). try: return object.__getattribute__(self, attr) except AttributeError: # attr must be a fieldname pass # look for a field with this name fielddict = ndarray.__getattribute__(self, 'dtype').fields try: res = fielddict[attr][:2] except (TypeError, KeyError): raise AttributeError("recarray has no attribute %s" % attr) obj = self.getfield(*res) # At this point obj will always be a recarray, since (see # PyArray_GetField) the type of obj is inherited. Next, if obj.dtype is # non-structured, convert it to an ndarray. Then if obj is structured # with void type convert it to the same dtype.type (eg to preserve # numpy.record type if present), since nested structured fields do not # inherit type. Don't do this for non-void structures though. if obj.dtype.fields: if issubclass(obj.dtype.type, nt.void): return obj.view(dtype=(self.dtype.type, obj.dtype)) return obj else: return obj.view(ndarray) # Save the dictionary. # If the attr is a field name and not in the saved dictionary # Undo any "setting" of the attribute and do a setfield # Thus, you can't create attributes on-the-fly that are field names. def __setattr__(self, attr, val): # Automatically convert (void) structured types to records # (but not non-void structures, subarrays, or non-structured voids) if attr == 'dtype' and issubclass(val.type, nt.void) and val.fields: val = sb.dtype((record, val)) newattr = attr not in self.__dict__ try: ret = object.__setattr__(self, attr, val) except Exception: fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} if attr not in fielddict: exctype, value = sys.exc_info()[:2] raise exctype(value) else: fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} if attr not in fielddict: return ret if newattr: # We just added this one or this setattr worked on an # internal attribute. try: object.__delattr__(self, attr) except Exception: return ret try: res = fielddict[attr][:2] except (TypeError, KeyError): raise AttributeError("record array has no attribute %s" % attr) return self.setfield(val, *res) def __getitem__(self, indx): obj = super(recarray, self).__getitem__(indx) # copy behavior of getattr, except that here # we might also be returning a single element if isinstance(obj, ndarray): if obj.dtype.fields: obj = obj.view(type(self)) if issubclass(obj.dtype.type, nt.void): return obj.view(dtype=(self.dtype.type, obj.dtype)) return obj else: return obj.view(type=ndarray) else: # return a single element return obj def __repr__(self): repr_dtype = self.dtype if (self.dtype.type is record or (not issubclass(self.dtype.type, nt.void))): # If this is a full record array (has numpy.record dtype), # or if it has a scalar (non-void) dtype with no records, # represent it using the rec.array function. Since rec.array # converts dtype to a numpy.record for us, convert back # to non-record before printing if repr_dtype.type is record: repr_dtype = sb.dtype((nt.void, repr_dtype)) prefix = "rec.array(" fmt = 'rec.array(%s,%sdtype=%s)' else: # otherwise represent it using np.array plus a view # This should only happen if the user is playing # strange games with dtypes. prefix = "array(" fmt = 'array(%s,%sdtype=%s).view(numpy.recarray)' # get data/shape string. logic taken from numeric.array_repr if self.size > 0 or self.shape == (0,): lst = sb.array2string( self, separator=', ', prefix=prefix, suffix=',') else: # show zero-length shape unless it is (0,) lst = "[], shape=%s" % (repr(self.shape),) lf = '\n'+' '*len(prefix) if get_printoptions()['legacy'] == '1.13': lf = ' ' + lf # trailing space return fmt % (lst, lf, repr_dtype) def field(self, attr, val=None): if isinstance(attr, int): names = ndarray.__getattribute__(self, 'dtype').names attr = names[attr] fielddict = ndarray.__getattribute__(self, 'dtype').fields res = fielddict[attr][:2] if val is None: obj = self.getfield(*res) if obj.dtype.fields: return obj return obj.view(ndarray) else: return self.setfield(val, *res) def fromarrays(arrayList, dtype=None, shape=None, formats=None, names=None, titles=None, aligned=False, byteorder=None): """ create a record array from a (flat) list of arrays >>> x1=np.array([1,2,3,4]) >>> x2=np.array(['a','dd','xyz','12']) >>> x3=np.array([1.1,2,3,4]) >>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c') >>> print(r[1]) (2, 'dd', 2.0) >>> x1[1]=34 >>> r.a array([1, 2, 3, 4]) """ arrayList = [sb.asarray(x) for x in arrayList] if shape is None or shape == 0: shape = arrayList[0].shape if isinstance(shape, int): shape = (shape,) if formats is None and dtype is None: # go through each object in the list to see if it is an ndarray # and determine the formats. formats = [] for obj in arrayList: if not isinstance(obj, ndarray): raise ValueError("item in the array list must be an ndarray.") formats.append(obj.dtype.str) formats = ','.join(formats) if dtype is not None: descr = sb.dtype(dtype) _names = descr.names else: parsed = format_parser(formats, names, titles, aligned, byteorder) _names = parsed._names descr = parsed._descr # Determine shape from data-type. if len(descr) != len(arrayList): raise ValueError("mismatch between the number of fields " "and the number of arrays") d0 = descr[0].shape nn = len(d0) if nn > 0: shape = shape[:-nn] for k, obj in enumerate(arrayList): nn = descr[k].ndim testshape = obj.shape[:obj.ndim - nn] if testshape != shape: raise ValueError("array-shape mismatch in array %d" % k) _array = recarray(shape, descr) # populate the record array (makes a copy) for i in range(len(arrayList)): _array[_names[i]] = arrayList[i] return _array def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, titles=None, aligned=False, byteorder=None): """ create a recarray from a list of records in text form The data in the same field can be heterogeneous, they will be promoted to the highest data type. This method is intended for creating smaller record arrays. If used to create large array without formats defined r=fromrecords([(2,3.,'abc')]*100000) it can be slow. If formats is None, then this will auto-detect formats. Use list of tuples rather than list of lists for faster processing. >>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)], ... names='col1,col2,col3') >>> print(r[0]) (456, 'dbe', 1.2) >>> r.col1 array([456, 2]) >>> r.col2 array(['dbe', 'de'], dtype='|S3') >>> import pickle >>> print(pickle.loads(pickle.dumps(r))) [(456, 'dbe', 1.2) (2, 'de', 1.3)] """ if formats is None and dtype is None: # slower obj = sb.array(recList, dtype=object) arrlist = [sb.array(obj[..., i].tolist()) for i in range(obj.shape[-1])] return fromarrays(arrlist, formats=formats, shape=shape, names=names, titles=titles, aligned=aligned, byteorder=byteorder) if dtype is not None: descr = sb.dtype((record, dtype)) else: descr = format_parser(formats, names, titles, aligned, byteorder)._descr try: retval = sb.array(recList, dtype=descr) except (TypeError, ValueError): if (shape is None or shape == 0): shape = len(recList) if isinstance(shape, (int, long)): shape = (shape,) if len(shape) > 1: raise ValueError("Can only deal with 1-d array.") _array = recarray(shape, descr) for k in range(_array.size): _array[k] = tuple(recList[k]) # list of lists instead of list of tuples ? # 2018-02-07, 1.14.1 warnings.warn( "fromrecords expected a list of tuples, may have received a list " "of lists instead. In the future that will raise an error", FutureWarning, stacklevel=2) return _array else: if shape is not None and retval.shape != shape: retval.shape = shape res = retval.view(recarray) return res def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, names=None, titles=None, aligned=False, byteorder=None): """ create a (read-only) record array from binary data contained in a string""" if dtype is None and formats is None: raise TypeError("fromstring() needs a 'dtype' or 'formats' argument") if dtype is not None: descr = sb.dtype(dtype) else: descr = format_parser(formats, names, titles, aligned, byteorder)._descr itemsize = descr.itemsize if (shape is None or shape == 0 or shape == -1): shape = (len(datastring) - offset) // itemsize _array = recarray(shape, descr, buf=datastring, offset=offset) return _array def get_remaining_size(fd): try: fn = fd.fileno() except AttributeError: return os.path.getsize(fd.name) - fd.tell() st = os.fstat(fn) size = st.st_size - fd.tell() return size def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, names=None, titles=None, aligned=False, byteorder=None): """Create an array from binary file data If file is a string or a path-like object then that file is opened, else it is assumed to be a file object. The file object must support random access (i.e. it must have tell and seek methods). >>> from tempfile import TemporaryFile >>> a = np.empty(10,dtype='f8,i4,a5') >>> a[5] = (0.5,10,'abcde') >>> >>> fd=TemporaryFile() >>> a = a.newbyteorder('<') >>> a.tofile(fd) >>> >>> fd.seek(0) >>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10, ... byteorder='<') >>> print(r[5]) (0.5, 10, 'abcde') >>> r.shape (10,) """ if dtype is None and formats is None: raise TypeError("fromfile() needs a 'dtype' or 'formats' argument") if (shape is None or shape == 0): shape = (-1,) elif isinstance(shape, (int, long)): shape = (shape,) if isfileobj(fd): # file already opened name = 0 else: # open file fd = open(os_fspath(fd), 'rb') name = 1 if (offset > 0): fd.seek(offset, 1) size = get_remaining_size(fd) if dtype is not None: descr = sb.dtype(dtype) else: descr = format_parser(formats, names, titles, aligned, byteorder)._descr itemsize = descr.itemsize shapeprod = sb.array(shape).prod(dtype=nt.intp) shapesize = shapeprod * itemsize if shapesize < 0: shape = list(shape) shape[shape.index(-1)] = size // -shapesize shape = tuple(shape) shapeprod = sb.array(shape).prod(dtype=nt.intp) nbytes = shapeprod * itemsize if nbytes > size: raise ValueError( "Not enough bytes left in file for specified shape and type") # create the array _array = recarray(shape, descr) nbytesread = fd.readinto(_array.data) if nbytesread != nbytes: raise IOError("Didn't read as many bytes as expected") if name: fd.close() return _array def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None, names=None, titles=None, aligned=False, byteorder=None, copy=True): """Construct a record array from a wide-variety of objects. """ if ((isinstance(obj, (type(None), str)) or isfileobj(obj)) and (formats is None) and (dtype is None)): raise ValueError("Must define formats (or dtype) if object is " "None, string, or an open file") kwds = {} if dtype is not None: dtype = sb.dtype(dtype) elif formats is not None: dtype = format_parser(formats, names, titles, aligned, byteorder)._descr else: kwds = {'formats': formats, 'names': names, 'titles': titles, 'aligned': aligned, 'byteorder': byteorder } if obj is None: if shape is None: raise ValueError("Must define a shape if obj is None") return recarray(shape, dtype, buf=obj, offset=offset, strides=strides) elif isinstance(obj, bytes): return fromstring(obj, dtype, shape=shape, offset=offset, **kwds) elif isinstance(obj, (list, tuple)): if isinstance(obj[0], (tuple, list)): return fromrecords(obj, dtype=dtype, shape=shape, **kwds) else: return fromarrays(obj, dtype=dtype, shape=shape, **kwds) elif isinstance(obj, recarray): if dtype is not None and (obj.dtype != dtype): new = obj.view(dtype) else: new = obj if copy: new = new.copy() return new elif isfileobj(obj): return fromfile(obj, dtype=dtype, shape=shape, offset=offset) elif isinstance(obj, ndarray): if dtype is not None and (obj.dtype != dtype): new = obj.view(dtype) else: new = obj if copy: new = new.copy() return new.view(recarray) else: interface = getattr(obj, "__array_interface__", None) if interface is None or not isinstance(interface, dict): raise ValueError("Unknown input type") obj = sb.array(obj) if dtype is not None and (obj.dtype != dtype): obj = obj.view(dtype) return obj.view(recarray)
[ "\"\"\"\n", "Record Arrays\n", "=============\n", "Record arrays expose the fields of structured arrays as properties.\n", "\n", "Most commonly, ndarrays contain elements of a single type, e.g. floats,\n", "integers, bools etc. However, it is possible for elements to be combinations\n", "of these using structured types, such as::\n", "\n", " >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', int), ('y', float)])\n", " >>> a\n", " array([(1, 2.0), (1, 2.0)],\n", " dtype=[('x', '<i4'), ('y', '<f8')])\n", "\n", "Here, each element consists of two fields: x (and int), and y (a float).\n", "This is known as a structured array. The different fields are analogous\n", "to columns in a spread-sheet. The different fields can be accessed as\n", "one would a dictionary::\n", "\n", " >>> a['x']\n", " array([1, 1])\n", "\n", " >>> a['y']\n", " array([ 2., 2.])\n", "\n", "Record arrays allow us to access fields as properties::\n", "\n", " >>> ar = np.rec.array(a)\n", "\n", " >>> ar.x\n", " array([1, 1])\n", "\n", " >>> ar.y\n", " array([ 2., 2.])\n", "\n", "\"\"\"\n", "from __future__ import division, absolute_import, print_function\n", "\n", "import sys\n", "import os\n", "import warnings\n", "\n", "from . import numeric as sb\n", "from . import numerictypes as nt\n", "from numpy.compat import isfileobj, bytes, long, unicode, os_fspath\n", "from numpy.core.overrides import set_module\n", "from .arrayprint import get_printoptions\n", "\n", "# All of the functions allow formats to be a dtype\n", "__all__ = ['record', 'recarray', 'format_parser']\n", "\n", "\n", "ndarray = sb.ndarray\n", "\n", "_byteorderconv = {'b':'>',\n", " 'l':'<',\n", " 'n':'=',\n", " 'B':'>',\n", " 'L':'<',\n", " 'N':'=',\n", " 'S':'s',\n", " 's':'s',\n", " '>':'>',\n", " '<':'<',\n", " '=':'=',\n", " '|':'|',\n", " 'I':'|',\n", " 'i':'|'}\n", "\n", "# formats regular expression\n", "# allows multidimension spec with a tuple syntax in front\n", "# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '\n", "# are equally allowed\n", "\n", "numfmt = nt.typeDict\n", "\n", "def find_duplicate(list):\n", " \"\"\"Find duplication in a list, return a list of duplicated elements\"\"\"\n", " dup = []\n", " for i in range(len(list)):\n", " if (list[i] in list[i + 1:]):\n", " if (list[i] not in dup):\n", " dup.append(list[i])\n", " return dup\n", "\n", "\n", "@set_module('numpy')\n", "class format_parser(object):\n", " \"\"\"\n", " Class to convert formats, names, titles description to a dtype.\n", "\n", " After constructing the format_parser object, the dtype attribute is\n", " the converted data-type:\n", " ``dtype = format_parser(formats, names, titles).dtype``\n", "\n", " Attributes\n", " ----------\n", " dtype : dtype\n", " The converted data-type.\n", "\n", " Parameters\n", " ----------\n", " formats : str or list of str\n", " The format description, either specified as a string with\n", " comma-separated format descriptions in the form ``'f8, i4, a5'``, or\n", " a list of format description strings in the form\n", " ``['f8', 'i4', 'a5']``.\n", " names : str or list/tuple of str\n", " The field names, either specified as a comma-separated string in the\n", " form ``'col1, col2, col3'``, or as a list or tuple of strings in the\n", " form ``['col1', 'col2', 'col3']``.\n", " An empty list can be used, in that case default field names\n", " ('f0', 'f1', ...) are used.\n", " titles : sequence\n", " Sequence of title strings. An empty list can be used to leave titles\n", " out.\n", " aligned : bool, optional\n", " If True, align the fields by padding as the C-compiler would.\n", " Default is False.\n", " byteorder : str, optional\n", " If specified, all the fields will be changed to the\n", " provided byte-order. Otherwise, the default byte-order is\n", " used. For all available string specifiers, see `dtype.newbyteorder`.\n", "\n", " See Also\n", " --------\n", " dtype, typename, sctype2char\n", "\n", " Examples\n", " --------\n", " >>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],\n", " ... ['T1', 'T2', 'T3']).dtype\n", " dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4'),\n", " (('T3', 'col3'), '|S5')])\n", "\n", " `names` and/or `titles` can be empty lists. If `titles` is an empty list,\n", " titles will simply not appear. If `names` is empty, default field names\n", " will be used.\n", "\n", " >>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],\n", " ... []).dtype\n", " dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '|S5')])\n", " >>> np.format_parser(['f8', 'i4', 'a5'], [], []).dtype\n", " dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', '|S5')])\n", "\n", " \"\"\"\n", "\n", " def __init__(self, formats, names, titles, aligned=False, byteorder=None):\n", " self._parseFormats(formats, aligned)\n", " self._setfieldnames(names, titles)\n", " self._createdescr(byteorder)\n", " self.dtype = self._descr\n", "\n", " def _parseFormats(self, formats, aligned=0):\n", " \"\"\" Parse the field formats \"\"\"\n", "\n", " if formats is None:\n", " raise ValueError(\"Need formats argument\")\n", " if isinstance(formats, list):\n", " if len(formats) < 2:\n", " formats.append('')\n", " formats = ','.join(formats)\n", " dtype = sb.dtype(formats, aligned)\n", " fields = dtype.fields\n", " if fields is None:\n", " dtype = sb.dtype([('f1', dtype)], aligned)\n", " fields = dtype.fields\n", " keys = dtype.names\n", " self._f_formats = [fields[key][0] for key in keys]\n", " self._offsets = [fields[key][1] for key in keys]\n", " self._nfields = len(keys)\n", "\n", " def _setfieldnames(self, names, titles):\n", " \"\"\"convert input field names into a list and assign to the _names\n", " attribute \"\"\"\n", "\n", " if (names):\n", " if (type(names) in [list, tuple]):\n", " pass\n", " elif isinstance(names, (str, unicode)):\n", " names = names.split(',')\n", " else:\n", " raise NameError(\"illegal input names %s\" % repr(names))\n", "\n", " self._names = [n.strip() for n in names[:self._nfields]]\n", " else:\n", " self._names = []\n", "\n", " # if the names are not specified, they will be assigned as\n", " # \"f0, f1, f2,...\"\n", " # if not enough names are specified, they will be assigned as \"f[n],\n", " # f[n+1],...\" etc. where n is the number of specified names...\"\n", " self._names += ['f%d' % i for i in range(len(self._names),\n", " self._nfields)]\n", " # check for redundant names\n", " _dup = find_duplicate(self._names)\n", " if _dup:\n", " raise ValueError(\"Duplicate field names: %s\" % _dup)\n", "\n", " if (titles):\n", " self._titles = [n.strip() for n in titles[:self._nfields]]\n", " else:\n", " self._titles = []\n", " titles = []\n", "\n", " if (self._nfields > len(titles)):\n", " self._titles += [None] * (self._nfields - len(titles))\n", "\n", " def _createdescr(self, byteorder):\n", " descr = sb.dtype({'names':self._names,\n", " 'formats':self._f_formats,\n", " 'offsets':self._offsets,\n", " 'titles':self._titles})\n", " if (byteorder is not None):\n", " byteorder = _byteorderconv[byteorder[0]]\n", " descr = descr.newbyteorder(byteorder)\n", "\n", " self._descr = descr\n", "\n", "class record(nt.void):\n", " \"\"\"A data-type scalar that allows field access as attribute lookup.\n", " \"\"\"\n", "\n", " # manually set name and module so that this class's type shows up\n", " # as numpy.record when printed\n", " __name__ = 'record'\n", " __module__ = 'numpy'\n", "\n", " def __repr__(self):\n", " if get_printoptions()['legacy'] == '1.13':\n", " return self.__str__()\n", " return super(record, self).__repr__()\n", "\n", " def __str__(self):\n", " if get_printoptions()['legacy'] == '1.13':\n", " return str(self.item())\n", " return super(record, self).__str__()\n", "\n", " def __getattribute__(self, attr):\n", " if attr in ['setfield', 'getfield', 'dtype']:\n", " return nt.void.__getattribute__(self, attr)\n", " try:\n", " return nt.void.__getattribute__(self, attr)\n", " except AttributeError:\n", " pass\n", " fielddict = nt.void.__getattribute__(self, 'dtype').fields\n", " res = fielddict.get(attr, None)\n", " if res:\n", " obj = self.getfield(*res[:2])\n", " # if it has fields return a record,\n", " # otherwise return the object\n", " try:\n", " dt = obj.dtype\n", " except AttributeError:\n", " #happens if field is Object type\n", " return obj\n", " if dt.fields:\n", " return obj.view((self.__class__, obj.dtype.fields))\n", " return obj\n", " else:\n", " raise AttributeError(\"'record' object has no \"\n", " \"attribute '%s'\" % attr)\n", "\n", " def __setattr__(self, attr, val):\n", " if attr in ['setfield', 'getfield', 'dtype']:\n", " raise AttributeError(\"Cannot set '%s' attribute\" % attr)\n", " fielddict = nt.void.__getattribute__(self, 'dtype').fields\n", " res = fielddict.get(attr, None)\n", " if res:\n", " return self.setfield(val, *res[:2])\n", " else:\n", " if getattr(self, attr, None):\n", " return nt.void.__setattr__(self, attr, val)\n", " else:\n", " raise AttributeError(\"'record' object has no \"\n", " \"attribute '%s'\" % attr)\n", "\n", " def __getitem__(self, indx):\n", " obj = nt.void.__getitem__(self, indx)\n", "\n", " # copy behavior of record.__getattribute__,\n", " if isinstance(obj, nt.void) and obj.dtype.fields:\n", " return obj.view((self.__class__, obj.dtype.fields))\n", " else:\n", " # return a single element\n", " return obj\n", "\n", " def pprint(self):\n", " \"\"\"Pretty-print all fields.\"\"\"\n", " # pretty-print all fields\n", " names = self.dtype.names\n", " maxlen = max(len(name) for name in names)\n", " fmt = '%% %ds: %%s' % maxlen\n", " rows = [fmt % (name, getattr(self, name)) for name in names]\n", " return \"\\n\".join(rows)\n", "\n", "# The recarray is almost identical to a standard array (which supports\n", "# named fields already) The biggest difference is that it can use\n", "# attribute-lookup to find the fields and it is constructed using\n", "# a record.\n", "\n", "# If byteorder is given it forces a particular byteorder on all\n", "# the fields (and any subfields)\n", "\n", "class recarray(ndarray):\n", " \"\"\"Construct an ndarray that allows field access using attributes.\n", "\n", " Arrays may have a data-types containing fields, analogous\n", " to columns in a spread sheet. An example is ``[(x, int), (y, float)]``,\n", " where each entry in the array is a pair of ``(int, float)``. Normally,\n", " these attributes are accessed using dictionary lookups such as ``arr['x']``\n", " and ``arr['y']``. Record arrays allow the fields to be accessed as members\n", " of the array, using ``arr.x`` and ``arr.y``.\n", "\n", " Parameters\n", " ----------\n", " shape : tuple\n", " Shape of output array.\n", " dtype : data-type, optional\n", " The desired data-type. By default, the data-type is determined\n", " from `formats`, `names`, `titles`, `aligned` and `byteorder`.\n", " formats : list of data-types, optional\n", " A list containing the data-types for the different columns, e.g.\n", " ``['i4', 'f8', 'i4']``. `formats` does *not* support the new\n", " convention of using types directly, i.e. ``(int, float, int)``.\n", " Note that `formats` must be a list, not a tuple.\n", " Given that `formats` is somewhat limited, we recommend specifying\n", " `dtype` instead.\n", " names : tuple of str, optional\n", " The name of each column, e.g. ``('x', 'y', 'z')``.\n", " buf : buffer, optional\n", " By default, a new array is created of the given shape and data-type.\n", " If `buf` is specified and is an object exposing the buffer interface,\n", " the array will use the memory from the existing buffer. In this case,\n", " the `offset` and `strides` keywords are available.\n", "\n", " Other Parameters\n", " ----------------\n", " titles : tuple of str, optional\n", " Aliases for column names. For example, if `names` were\n", " ``('x', 'y', 'z')`` and `titles` is\n", " ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then\n", " ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``.\n", " byteorder : {'<', '>', '='}, optional\n", " Byte-order for all fields.\n", " aligned : bool, optional\n", " Align the fields in memory as the C-compiler would.\n", " strides : tuple of ints, optional\n", " Buffer (`buf`) is interpreted according to these strides (strides\n", " define how many bytes each array element, row, column, etc.\n", " occupy in memory).\n", " offset : int, optional\n", " Start reading buffer (`buf`) from this offset onwards.\n", " order : {'C', 'F'}, optional\n", " Row-major (C-style) or column-major (Fortran-style) order.\n", "\n", " Returns\n", " -------\n", " rec : recarray\n", " Empty array of the given shape and type.\n", "\n", " See Also\n", " --------\n", " rec.fromrecords : Construct a record array from data.\n", " record : fundamental data-type for `recarray`.\n", " format_parser : determine a data-type from formats, names, titles.\n", "\n", " Notes\n", " -----\n", " This constructor can be compared to ``empty``: it creates a new record\n", " array but does not fill it with data. To create a record array from data,\n", " use one of the following methods:\n", "\n", " 1. Create a standard ndarray and convert it to a record array,\n", " using ``arr.view(np.recarray)``\n", " 2. Use the `buf` keyword.\n", " 3. Use `np.rec.fromrecords`.\n", "\n", " Examples\n", " --------\n", " Create an array with two fields, ``x`` and ``y``:\n", "\n", " >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)])\n", " >>> x\n", " array([(1.0, 2), (3.0, 4)],\n", " dtype=[('x', '<f8'), ('y', '<i4')])\n", "\n", " >>> x['x']\n", " array([ 1., 3.])\n", "\n", " View the array as a record array:\n", "\n", " >>> x = x.view(np.recarray)\n", "\n", " >>> x.x\n", " array([ 1., 3.])\n", "\n", " >>> x.y\n", " array([2, 4])\n", "\n", " Create a new, empty record array:\n", "\n", " >>> np.recarray((2,),\n", " ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP\n", " rec.array([(-1073741821, 1.2249118382103472e-301, 24547520),\n", " (3471280, 1.2134086255804012e-316, 0)],\n", " dtype=[('x', '<i4'), ('y', '<f8'), ('z', '<i4')])\n", "\n", " \"\"\"\n", "\n", " # manually set name and module so that this class's type shows\n", " # up as \"numpy.recarray\" when printed\n", " __name__ = 'recarray'\n", " __module__ = 'numpy'\n", "\n", " def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None,\n", " formats=None, names=None, titles=None,\n", " byteorder=None, aligned=False, order='C'):\n", "\n", " if dtype is not None:\n", " descr = sb.dtype(dtype)\n", " else:\n", " descr = format_parser(formats, names, titles, aligned, byteorder)._descr\n", "\n", " if buf is None:\n", " self = ndarray.__new__(subtype, shape, (record, descr), order=order)\n", " else:\n", " self = ndarray.__new__(subtype, shape, (record, descr),\n", " buffer=buf, offset=offset,\n", " strides=strides, order=order)\n", " return self\n", "\n", " def __array_finalize__(self, obj):\n", " if self.dtype.type is not record and self.dtype.fields:\n", " # if self.dtype is not np.record, invoke __setattr__ which will\n", " # convert it to a record if it is a void dtype.\n", " self.dtype = self.dtype\n", "\n", " def __getattribute__(self, attr):\n", " # See if ndarray has this attr, and return it if so. (note that this\n", " # means a field with the same name as an ndarray attr cannot be\n", " # accessed by attribute).\n", " try:\n", " return object.__getattribute__(self, attr)\n", " except AttributeError: # attr must be a fieldname\n", " pass\n", "\n", " # look for a field with this name\n", " fielddict = ndarray.__getattribute__(self, 'dtype').fields\n", " try:\n", " res = fielddict[attr][:2]\n", " except (TypeError, KeyError):\n", " raise AttributeError(\"recarray has no attribute %s\" % attr)\n", " obj = self.getfield(*res)\n", "\n", " # At this point obj will always be a recarray, since (see\n", " # PyArray_GetField) the type of obj is inherited. Next, if obj.dtype is\n", " # non-structured, convert it to an ndarray. Then if obj is structured\n", " # with void type convert it to the same dtype.type (eg to preserve\n", " # numpy.record type if present), since nested structured fields do not\n", " # inherit type. Don't do this for non-void structures though.\n", " if obj.dtype.fields:\n", " if issubclass(obj.dtype.type, nt.void):\n", " return obj.view(dtype=(self.dtype.type, obj.dtype))\n", " return obj\n", " else:\n", " return obj.view(ndarray)\n", "\n", " # Save the dictionary.\n", " # If the attr is a field name and not in the saved dictionary\n", " # Undo any \"setting\" of the attribute and do a setfield\n", " # Thus, you can't create attributes on-the-fly that are field names.\n", " def __setattr__(self, attr, val):\n", "\n", " # Automatically convert (void) structured types to records\n", " # (but not non-void structures, subarrays, or non-structured voids)\n", " if attr == 'dtype' and issubclass(val.type, nt.void) and val.fields:\n", " val = sb.dtype((record, val))\n", "\n", " newattr = attr not in self.__dict__\n", " try:\n", " ret = object.__setattr__(self, attr, val)\n", " except Exception:\n", " fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}\n", " if attr not in fielddict:\n", " exctype, value = sys.exc_info()[:2]\n", " raise exctype(value)\n", " else:\n", " fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}\n", " if attr not in fielddict:\n", " return ret\n", " if newattr:\n", " # We just added this one or this setattr worked on an\n", " # internal attribute.\n", " try:\n", " object.__delattr__(self, attr)\n", " except Exception:\n", " return ret\n", " try:\n", " res = fielddict[attr][:2]\n", " except (TypeError, KeyError):\n", " raise AttributeError(\"record array has no attribute %s\" % attr)\n", " return self.setfield(val, *res)\n", "\n", " def __getitem__(self, indx):\n", " obj = super(recarray, self).__getitem__(indx)\n", "\n", " # copy behavior of getattr, except that here\n", " # we might also be returning a single element\n", " if isinstance(obj, ndarray):\n", " if obj.dtype.fields:\n", " obj = obj.view(type(self))\n", " if issubclass(obj.dtype.type, nt.void):\n", " return obj.view(dtype=(self.dtype.type, obj.dtype))\n", " return obj\n", " else:\n", " return obj.view(type=ndarray)\n", " else:\n", " # return a single element\n", " return obj\n", "\n", " def __repr__(self):\n", "\n", " repr_dtype = self.dtype\n", " if (self.dtype.type is record\n", " or (not issubclass(self.dtype.type, nt.void))):\n", " # If this is a full record array (has numpy.record dtype),\n", " # or if it has a scalar (non-void) dtype with no records,\n", " # represent it using the rec.array function. Since rec.array\n", " # converts dtype to a numpy.record for us, convert back\n", " # to non-record before printing\n", " if repr_dtype.type is record:\n", " repr_dtype = sb.dtype((nt.void, repr_dtype))\n", " prefix = \"rec.array(\"\n", " fmt = 'rec.array(%s,%sdtype=%s)'\n", " else:\n", " # otherwise represent it using np.array plus a view\n", " # This should only happen if the user is playing\n", " # strange games with dtypes.\n", " prefix = \"array(\"\n", " fmt = 'array(%s,%sdtype=%s).view(numpy.recarray)'\n", "\n", " # get data/shape string. logic taken from numeric.array_repr\n", " if self.size > 0 or self.shape == (0,):\n", " lst = sb.array2string(\n", " self, separator=', ', prefix=prefix, suffix=',')\n", " else:\n", " # show zero-length shape unless it is (0,)\n", " lst = \"[], shape=%s\" % (repr(self.shape),)\n", "\n", " lf = '\\n'+' '*len(prefix)\n", " if get_printoptions()['legacy'] == '1.13':\n", " lf = ' ' + lf # trailing space\n", " return fmt % (lst, lf, repr_dtype)\n", "\n", " def field(self, attr, val=None):\n", " if isinstance(attr, int):\n", " names = ndarray.__getattribute__(self, 'dtype').names\n", " attr = names[attr]\n", "\n", " fielddict = ndarray.__getattribute__(self, 'dtype').fields\n", "\n", " res = fielddict[attr][:2]\n", "\n", " if val is None:\n", " obj = self.getfield(*res)\n", " if obj.dtype.fields:\n", " return obj\n", " return obj.view(ndarray)\n", " else:\n", " return self.setfield(val, *res)\n", "\n", "\n", "def fromarrays(arrayList, dtype=None, shape=None, formats=None,\n", " names=None, titles=None, aligned=False, byteorder=None):\n", " \"\"\" create a record array from a (flat) list of arrays\n", "\n", " >>> x1=np.array([1,2,3,4])\n", " >>> x2=np.array(['a','dd','xyz','12'])\n", " >>> x3=np.array([1.1,2,3,4])\n", " >>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c')\n", " >>> print(r[1])\n", " (2, 'dd', 2.0)\n", " >>> x1[1]=34\n", " >>> r.a\n", " array([1, 2, 3, 4])\n", " \"\"\"\n", "\n", " arrayList = [sb.asarray(x) for x in arrayList]\n", "\n", " if shape is None or shape == 0:\n", " shape = arrayList[0].shape\n", "\n", " if isinstance(shape, int):\n", " shape = (shape,)\n", "\n", " if formats is None and dtype is None:\n", " # go through each object in the list to see if it is an ndarray\n", " # and determine the formats.\n", " formats = []\n", " for obj in arrayList:\n", " if not isinstance(obj, ndarray):\n", " raise ValueError(\"item in the array list must be an ndarray.\")\n", " formats.append(obj.dtype.str)\n", " formats = ','.join(formats)\n", "\n", " if dtype is not None:\n", " descr = sb.dtype(dtype)\n", " _names = descr.names\n", " else:\n", " parsed = format_parser(formats, names, titles, aligned, byteorder)\n", " _names = parsed._names\n", " descr = parsed._descr\n", "\n", " # Determine shape from data-type.\n", " if len(descr) != len(arrayList):\n", " raise ValueError(\"mismatch between the number of fields \"\n", " \"and the number of arrays\")\n", "\n", " d0 = descr[0].shape\n", " nn = len(d0)\n", " if nn > 0:\n", " shape = shape[:-nn]\n", "\n", " for k, obj in enumerate(arrayList):\n", " nn = descr[k].ndim\n", " testshape = obj.shape[:obj.ndim - nn]\n", " if testshape != shape:\n", " raise ValueError(\"array-shape mismatch in array %d\" % k)\n", "\n", " _array = recarray(shape, descr)\n", "\n", " # populate the record array (makes a copy)\n", " for i in range(len(arrayList)):\n", " _array[_names[i]] = arrayList[i]\n", "\n", " return _array\n", "\n", "def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,\n", " titles=None, aligned=False, byteorder=None):\n", " \"\"\" create a recarray from a list of records in text form\n", "\n", " The data in the same field can be heterogeneous, they will be promoted\n", " to the highest data type. This method is intended for creating\n", " smaller record arrays. If used to create large array without formats\n", " defined\n", "\n", " r=fromrecords([(2,3.,'abc')]*100000)\n", "\n", " it can be slow.\n", "\n", " If formats is None, then this will auto-detect formats. Use list of\n", " tuples rather than list of lists for faster processing.\n", "\n", " >>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)],\n", " ... names='col1,col2,col3')\n", " >>> print(r[0])\n", " (456, 'dbe', 1.2)\n", " >>> r.col1\n", " array([456, 2])\n", " >>> r.col2\n", " array(['dbe', 'de'],\n", " dtype='|S3')\n", " >>> import pickle\n", " >>> print(pickle.loads(pickle.dumps(r)))\n", " [(456, 'dbe', 1.2) (2, 'de', 1.3)]\n", " \"\"\"\n", "\n", " if formats is None and dtype is None: # slower\n", " obj = sb.array(recList, dtype=object)\n", " arrlist = [sb.array(obj[..., i].tolist()) for i in range(obj.shape[-1])]\n", " return fromarrays(arrlist, formats=formats, shape=shape, names=names,\n", " titles=titles, aligned=aligned, byteorder=byteorder)\n", "\n", " if dtype is not None:\n", " descr = sb.dtype((record, dtype))\n", " else:\n", " descr = format_parser(formats, names, titles, aligned, byteorder)._descr\n", "\n", " try:\n", " retval = sb.array(recList, dtype=descr)\n", " except (TypeError, ValueError):\n", " if (shape is None or shape == 0):\n", " shape = len(recList)\n", " if isinstance(shape, (int, long)):\n", " shape = (shape,)\n", " if len(shape) > 1:\n", " raise ValueError(\"Can only deal with 1-d array.\")\n", " _array = recarray(shape, descr)\n", " for k in range(_array.size):\n", " _array[k] = tuple(recList[k])\n", " # list of lists instead of list of tuples ?\n", " # 2018-02-07, 1.14.1\n", " warnings.warn(\n", " \"fromrecords expected a list of tuples, may have received a list \"\n", " \"of lists instead. In the future that will raise an error\",\n", " FutureWarning, stacklevel=2)\n", " return _array\n", " else:\n", " if shape is not None and retval.shape != shape:\n", " retval.shape = shape\n", "\n", " res = retval.view(recarray)\n", "\n", " return res\n", "\n", "\n", "def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,\n", " names=None, titles=None, aligned=False, byteorder=None):\n", " \"\"\" create a (read-only) record array from binary data contained in\n", " a string\"\"\"\n", "\n", " if dtype is None and formats is None:\n", " raise TypeError(\"fromstring() needs a 'dtype' or 'formats' argument\")\n", "\n", " if dtype is not None:\n", " descr = sb.dtype(dtype)\n", " else:\n", " descr = format_parser(formats, names, titles, aligned, byteorder)._descr\n", "\n", " itemsize = descr.itemsize\n", " if (shape is None or shape == 0 or shape == -1):\n", " shape = (len(datastring) - offset) // itemsize\n", "\n", " _array = recarray(shape, descr, buf=datastring, offset=offset)\n", " return _array\n", "\n", "def get_remaining_size(fd):\n", " try:\n", " fn = fd.fileno()\n", " except AttributeError:\n", " return os.path.getsize(fd.name) - fd.tell()\n", " st = os.fstat(fn)\n", " size = st.st_size - fd.tell()\n", " return size\n", "\n", "def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,\n", " names=None, titles=None, aligned=False, byteorder=None):\n", " \"\"\"Create an array from binary file data\n", "\n", " If file is a string or a path-like object then that file is opened,\n", " else it is assumed to be a file object. The file object must\n", " support random access (i.e. it must have tell and seek methods).\n", "\n", " >>> from tempfile import TemporaryFile\n", " >>> a = np.empty(10,dtype='f8,i4,a5')\n", " >>> a[5] = (0.5,10,'abcde')\n", " >>>\n", " >>> fd=TemporaryFile()\n", " >>> a = a.newbyteorder('<')\n", " >>> a.tofile(fd)\n", " >>>\n", " >>> fd.seek(0)\n", " >>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10,\n", " ... byteorder='<')\n", " >>> print(r[5])\n", " (0.5, 10, 'abcde')\n", " >>> r.shape\n", " (10,)\n", " \"\"\"\n", " \n", " if dtype is None and formats is None:\n", " raise TypeError(\"fromfile() needs a 'dtype' or 'formats' argument\")\n", "\n", " if (shape is None or shape == 0):\n", " shape = (-1,)\n", " elif isinstance(shape, (int, long)):\n", " shape = (shape,)\n", "\n", " if isfileobj(fd):\n", " # file already opened\n", " name = 0\n", " else:\n", " # open file\n", " fd = open(os_fspath(fd), 'rb')\n", " name = 1\n", "\n", " if (offset > 0):\n", " fd.seek(offset, 1)\n", " size = get_remaining_size(fd)\n", "\n", " if dtype is not None:\n", " descr = sb.dtype(dtype)\n", " else:\n", " descr = format_parser(formats, names, titles, aligned, byteorder)._descr\n", "\n", " itemsize = descr.itemsize\n", "\n", " shapeprod = sb.array(shape).prod(dtype=nt.intp)\n", " shapesize = shapeprod * itemsize\n", " if shapesize < 0:\n", " shape = list(shape)\n", " shape[shape.index(-1)] = size // -shapesize\n", " shape = tuple(shape)\n", " shapeprod = sb.array(shape).prod(dtype=nt.intp)\n", "\n", " nbytes = shapeprod * itemsize\n", "\n", " if nbytes > size:\n", " raise ValueError(\n", " \"Not enough bytes left in file for specified shape and type\")\n", "\n", " # create the array\n", " _array = recarray(shape, descr)\n", " nbytesread = fd.readinto(_array.data)\n", " if nbytesread != nbytes:\n", " raise IOError(\"Didn't read as many bytes as expected\")\n", " if name:\n", " fd.close()\n", "\n", " return _array\n", "\n", "def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,\n", " names=None, titles=None, aligned=False, byteorder=None, copy=True):\n", " \"\"\"Construct a record array from a wide-variety of objects.\n", " \"\"\"\n", "\n", " if ((isinstance(obj, (type(None), str)) or isfileobj(obj)) and\n", " (formats is None) and (dtype is None)):\n", " raise ValueError(\"Must define formats (or dtype) if object is \"\n", " \"None, string, or an open file\")\n", "\n", " kwds = {}\n", " if dtype is not None:\n", " dtype = sb.dtype(dtype)\n", " elif formats is not None:\n", " dtype = format_parser(formats, names, titles,\n", " aligned, byteorder)._descr\n", " else:\n", " kwds = {'formats': formats,\n", " 'names': names,\n", " 'titles': titles,\n", " 'aligned': aligned,\n", " 'byteorder': byteorder\n", " }\n", "\n", " if obj is None:\n", " if shape is None:\n", " raise ValueError(\"Must define a shape if obj is None\")\n", " return recarray(shape, dtype, buf=obj, offset=offset, strides=strides)\n", "\n", " elif isinstance(obj, bytes):\n", " return fromstring(obj, dtype, shape=shape, offset=offset, **kwds)\n", "\n", " elif isinstance(obj, (list, tuple)):\n", " if isinstance(obj[0], (tuple, list)):\n", " return fromrecords(obj, dtype=dtype, shape=shape, **kwds)\n", " else:\n", " return fromarrays(obj, dtype=dtype, shape=shape, **kwds)\n", "\n", " elif isinstance(obj, recarray):\n", " if dtype is not None and (obj.dtype != dtype):\n", " new = obj.view(dtype)\n", " else:\n", " new = obj\n", " if copy:\n", " new = new.copy()\n", " return new\n", "\n", " elif isfileobj(obj):\n", " return fromfile(obj, dtype=dtype, shape=shape, offset=offset)\n", "\n", " elif isinstance(obj, ndarray):\n", " if dtype is not None and (obj.dtype != dtype):\n", " new = obj.view(dtype)\n", " else:\n", " new = obj\n", " if copy:\n", " new = new.copy()\n", " return new.view(recarray)\n", "\n", " else:\n", " interface = getattr(obj, \"__array_interface__\", None)\n", " if interface is None or not isinstance(interface, dict):\n", " raise ValueError(\"Unknown input type\")\n", " obj = sb.array(obj)\n", " if dtype is not None and (obj.dtype != dtype):\n", " obj = obj.view(dtype)\n", " return obj.view(recarray)\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.037037037037037035, 0.037037037037037035, 0.037037037037037035, 0.037037037037037035, 0.037037037037037035, 0.037037037037037035, 0.037037037037037035, 0.037037037037037035, 0.037037037037037035, 0.037037037037037035, 0.037037037037037035, 0.037037037037037035, 0.037037037037037035, 0.037037037037037035, 0, 0, 0, 0, 0, 0, 0, 0, 0.038461538461538464, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02127659574468085, 0.018867924528301886, 0.0196078431372549, 0.02, 0, 0, 0, 0, 0, 0, 0.043478260869565216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02040816326530612, 0, 0, 0, 0, 0, 0, 0.022222222222222223, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02040816326530612, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0.012345679012345678, 0, 0, 0.015384615384615385, 0.014705882352941176, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.022727272727272728, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.013333333333333334, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0.03571428571428571, 0, 0, 0, 0, 0, 0, 0, 0, 0.015384615384615385, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012987012987012988, 0, 0, 0, 0, 0, 0.0196078431372549, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
879
0.001373
import numpy as np import sklearn.svm import sklearn.ensemble import sklearn.neighbors import sklearn.decomposition import sklearn.preprocessing import sklearn.neural_network import sklearn.linear_model import sklearn.feature_extraction.text import sklearn.naive_bayes import sklearn.kernel_approximation from hyperopt.pyll import scope, as_apply from hyperopt import hp from .vkmeans import ColumnKMeans """ Sourceed from jaberg/hyperopt-sklearn Additional models added by tadejs """ @scope.define def sklearn_SVC(*args, **kwargs): return sklearn.svm.SVC(*args, **kwargs) @scope.define def sklearn_LinearSVC(*args, **kwargs): return sklearn.svm.LinearSVC(*args, **kwargs) @scope.define def sklearn_KNeighborsClassifier(*args, **kwargs): star_star_kwargs = kwargs.pop('starstar_kwargs') kwargs.update(star_star_kwargs) return sklearn.neighbors.KNeighborsClassifier(*args, **kwargs) @scope.define def sklearn_RandomForestClassifier(*args, **kwargs): return sklearn.ensemble.RandomForestClassifier(*args, **kwargs) @scope.define def sklearn_ExtraTreesClassifier(*args, **kwargs): return sklearn.ensemble.ExtraTreesClassifier(*args, **kwargs) @scope.define def sklearn_RandomForestRegressor(*args, **kwargs): return sklearn.ensemble.RandomForestRegressor(*args, **kwargs) @scope.define def sklearn_ExtraTreesRegressor(*args, **kwargs): return sklearn.ensemble.ExtraTreesRegressor(*args, **kwargs) @scope.define def sklearn_SGDClassifier(*args, **kwargs): return sklearn.linear_model.SGDClassifier(*args, **kwargs) @scope.define def sklearn_SGDRegressor(*args, **kwargs): return sklearn.linear_model.SGDRegressor(*args, **kwargs) @scope.define def sklearn_MultinomialNB(*args, **kwargs): return sklearn.naive_bayes.MultinomialNB(*args, **kwargs) @scope.define def sklearn_PCA(*args, **kwargs): return sklearn.decomposition.PCA(*args, **kwargs) @scope.define def sklearn_Nystrom(*args, **kwargs): return sklearn.kernel_approximation.Nystroem(*args, **kwargs) @scope.define def sklearn_Tfidf(*args, **kwargs): return sklearn.feature_extraction.text.TfidfVectorizer(*args, **kwargs) @scope.define def sklearn_StandardScaler(*args, **kwargs): return sklearn.preprocessing.StandardScaler(*args, **kwargs) @scope.define def sklearn_MinMaxScaler(*args, **kwargs): return sklearn.preprocessing.MinMaxScaler(*args, **kwargs) @scope.define def sklearn_Normalizer(*args, **kwargs): return sklearn.preprocessing.Normalizer(*args, **kwargs) @scope.define def sklearn_OneHotEncoder(*args, **kwargs): return sklearn.preprocessing.OneHotEncoder(*args, **kwargs) @scope.define def sklearn_BernoulliRBM(*args, **kwargs): return sklearn.neural_network.BernoulliRBM(*args, **kwargs) @scope.define def sklearn_ColumnKMeans(*args, **kwargs): return ColumnKMeans(*args, **kwargs) @scope.define def patience_param(x): """ Mark a hyperparameter as having a simple monotonic increasing relationship with both CPU time and the goodness of the model. """ # -- TODO: make this do something! return x @scope.define def inv_patience_param(x): """ Mark a hyperparameter as having a simple monotonic decreasing relationship with both CPU time and the goodness of the model. """ # -- TODO: make this do something! return x def hp_bool(name): return hp.choice(name, [False, True]) _svc_default_cache_size = 1000.0 def _svc_gamma(name): # -- making these non-conditional variables # probably helps the GP algorithm generalize gammanz = hp.choice(name + '.gammanz', [0, 1]) gamma = hp.lognormal(name + '.gamma', np.log(0.01), 2.5) return gammanz * gamma def _svc_max_iter(name): return scope.patience_param( scope.int( hp.loguniform( name + '.max_iter', np.log(1e7), np.log(1e9)))) def _svc_C(name): return hp.lognormal(name + '.C', np.log(1000.0), 3.0) def _svc_tol(name): return scope.inv_patience_param( hp.lognormal( name + '.tol', np.log(1e-3), 2.0)) def _random_state(name, random_state): if random_state is None: return hp.randint(name, 5) else: return random_state def svc_linear(name, C=None, shrinking=None, tol=None, max_iter=None, verbose=False, random_state=None, cache_size=_svc_default_cache_size): """ Return a pyll graph with hyperparamters that will construct a sklearn.svm.SVC model with a linear kernel. """ def _name(msg): return '%s.%s_%s' % (name, 'linear', msg) rval = scope.sklearn_SVC( kernel='linear', C=_svc_C(name + '.linear') if C is None else C, shrinking=hp_bool( _name('shrinking')) if shrinking is None else shrinking, tol=_svc_tol(name) if tol is None else tol, max_iter=_svc_max_iter(name) if max_iter is None else max_iter, verbose=verbose, random_state=_random_state(_name('.rstate'), random_state), cache_size=cache_size, ) return rval def svc_rbf(name, C=None, gamma=None, shrinking=None, tol=None, max_iter=None, verbose=False, random_state=None, cache_size=_svc_default_cache_size): """ Return a pyll graph with hyperparamters that will construct a sklearn.svm.SVC model with an RBF kernel. """ def _name(msg): return '%s.%s_%s' % (name, 'rbf', msg) rval = scope.sklearn_SVC( kernel='rbf', C=_svc_C(name + '.rbf') if C is None else C, gamma=_svc_gamma(name) if gamma is None else gamma, shrinking=hp_bool( _name('shrinking')) if shrinking is None else shrinking, tol=_svc_tol(name + '.rbf') if tol is None else tol, max_iter=(_svc_max_iter(name + '.rbf') if max_iter is None else max_iter), verbose=verbose, cache_size=cache_size, random_state=_random_state(_name('rstate'), random_state), ) return rval def svc_poly(name, C=None, gamma=None, coef0=None, degree=None, shrinking=None, tol=None, max_iter=None, verbose=False, random_state=None, cache_size=_svc_default_cache_size): """ Return a pyll graph with hyperparamters that will construct a sklearn.svm.SVC model with an RBF kernel. """ def _name(msg): return '%s.%s_%s' % (name, 'poly', msg) # -- (K(x, y) + coef0)^d coef0nz = hp.choice(_name('coef0nz'), [0, 1]) coef0 = hp.uniform(_name('coef0'), 0.0, 1.0) poly_coef0 = coef0nz * coef0 rval = scope.sklearn_SVC( kernel='poly', C=_svc_C(name + '.poly') if C is None else C, gamma=_svc_gamma(name + '.poly') if gamma is None else gamma, coef0=poly_coef0 if coef0 is None else coef0, degree=hp.quniform( _name('degree'), low=1.5, high=8.5, q=1) if degree is None else degree, shrinking=hp_bool( _name('shrinking')) if shrinking is None else shrinking, tol=_svc_tol(name + '.poly') if tol is None else tol, max_iter=(_svc_max_iter(name + '.poly') if max_iter is None else max_iter), verbose=verbose, random_state=_random_state(_name('.rstate'), random_state), cache_size=cache_size, ) return rval def svc_sigmoid(name, C=None, gamma=None, coef0=None, shrinking=None, tol=None, max_iter=None, verbose=False, random_state=None, cache_size=_svc_default_cache_size): """ Return a pyll graph with hyperparamters that will construct a sklearn.svm.SVC model with an RBF kernel. """ def _name(msg): return '%s.%s_%s' % (name, 'sigmoid', msg) # -- tanh(K(x, y) + coef0) coef0nz = hp.choice(_name('coef0nz'), [0, 1]) coef0 = hp.normal(_name('coef0'), 0.0, 1.0) sigm_coef0 = coef0nz * coef0 rval = scope.sklearn_SVC( kernel='sigmoid', C=_svc_C(name + '.sigmoid') if C is None else C, gamma=_svc_gamma(name + '.sigmoid') if gamma is None else gamma, coef0=sigm_coef0 if coef0 is None else coef0, shrinking=hp_bool( _name('shrinking')) if shrinking is None else shrinking, tol=_svc_tol(name + '.sigmoid') if tol is None else tol, max_iter=(_svc_max_iter(name + '.sigmoid') if max_iter is None else max_iter), verbose=verbose, random_state=_random_state(_name('rstate'), random_state), cache_size=cache_size) return rval def svc(name, C=None, kernels=['linear', 'rbf', 'poly', 'sigmoid'], shrinking=None, tol=None, max_iter=None, verbose=False, random_state=None, cache_size=_svc_default_cache_size): svms = { 'linear': svc_linear( name, C=C, shrinking=shrinking, tol=tol, max_iter=max_iter, random_state=random_state, verbose=verbose), 'rbf': svc_rbf( name, C=C, shrinking=shrinking, tol=tol, max_iter=max_iter, random_state=random_state, verbose=verbose), 'poly': svc_poly( name, C=C, shrinking=shrinking, tol=tol, max_iter=max_iter, random_state=random_state, verbose=verbose), 'sigmoid': svc_sigmoid( name, C=C, shrinking=shrinking, tol=tol, max_iter=max_iter, random_state=random_state, verbose=verbose), } choices = [svms[kern] for kern in kernels] if len(choices) == 1: rval = choices[0] else: rval = hp.choice('%s.kernel' % name, choices) return rval # TODO: Some combinations of parameters are not allowed in LinearSVC def liblinear_svc(name, C=None, loss=None, penalty=None, dual=None, tol=None, multi_class=None, fit_intercept=None, intercept_scaling=None, class_weight=None, random_state=None, verbose=False): def _name(msg): return '%s.%s_%s' % (name, 'linear_svc', msg) """ The combination of penalty='l1' and loss='l1' is not supported penalty='l2' and ploss='l1' is only supported when dual='true' penalty='l1' is only supported when dual='false' """ loss_penalty_dual = hp.choice(_name('loss_penalty_dual'), [('l1', 'l2', True), ('l2', 'l2', True), ('l2', 'l1', False), ('l2', 'l2', False)]) rval = scope.sklearn_LinearSVC( C=_svc_C(name + '.liblinear') if C is None else C, loss=loss_penalty_dual[0] if loss is None else loss, penalty=loss_penalty_dual[1] if penalty is None else penalty, dual=loss_penalty_dual[2] if dual is None else dual, tol=_svc_tol(name + '.liblinear') if tol is None else tol, multi_class=hp.choice( _name('multi_class'), ['ovr', 'crammer_singer']) if multi_class is None else multi_class, fit_intercept=hp.choice( _name('fit_intercept'), [True, False]) if fit_intercept is None else fit_intercept, random_state=_random_state(_name('rstate'), random_state), verbose=verbose, ) return rval # TODO: Pick reasonable default values def knn(name, sparse_data=False, n_neighbors=None, weights=None, leaf_size=None, metric=None, p=None, **kwargs): def _name(msg): return '%s.%s_%s' % (name, 'knn', msg) if sparse_data: metric_args = { 'metric':'euclidean' } else: metric_args = hp.pchoice(_name('metric'), [ (0.65, { 'metric':'euclidean' }), (0.10, { 'metric':'manhattan' }), (0.10, { 'metric':'chebyshev' }), (0.10, { 'metric':'minkowski', 'p':scope.int(hp.quniform(_name('minkowski_p'), 1, 5, 1))}), (0.05, { 'metric':'wminkowski', 'p':scope.int(hp.quniform(_name('wminkowski_p'), 1, 5, 1)), 'w':hp.uniform(_name('wminkowski_w'), 0, 100) }), ]) rval = scope.sklearn_KNeighborsClassifier( n_neighbors=scope.int(hp.quniform( _name('n_neighbors'), 0.5, 50, 1)) if n_neighbors is None else n_neighbors, weights=hp.choice( _name('weights'), ['uniform', 'distance']) if weights is None else weights, leaf_size=scope.int(hp.quniform( _name('leaf_size'), 0.51, 100, 1)) if leaf_size is None else leaf_size, starstar_kwargs=metric_args ) return rval # TODO: Pick reasonable default values def random_forest(name, n_estimators=None, criterion=None, max_features=None, max_depth=None, min_samples_split=None, min_samples_leaf=None, bootstrap=None, oob_score=None, n_jobs=1, random_state=None, verbose=False): def _name(msg): return '%s.%s_%s' % (name, 'random_forest', msg) """ Out of bag estimation only available if bootstrap=True """ bootstrap_oob = hp.choice(_name('bootstrap_oob'), [(True, True), (True, False), (False, False)]) rval = scope.sklearn_RandomForestClassifier( n_estimators=scope.int(hp.quniform( _name('n_estimators'), 1, 50, 1)) if n_estimators is None else n_estimators, criterion=hp.choice( _name('criterion'), ['gini', 'entropy']) if criterion is None else criterion, max_features=hp.choice( _name('max_features'), ['sqrt', 'log2', None]) if max_features is None else max_features, max_depth=max_depth, min_samples_split=hp.quniform( _name('min_samples_split'), 1, 10, 1) if min_samples_split is None else min_samples_split, min_samples_leaf=hp.quniform( _name('min_samples_leaf'), 1, 5, 1) if min_samples_leaf is None else min_samples_leaf, bootstrap=bootstrap_oob[0] if bootstrap is None else bootstrap, oob_score=bootstrap_oob[1] if oob_score is None else oob_score, n_jobs=n_jobs, random_state=_random_state(_name('rstate'), random_state), verbose=verbose, ) return rval # TODO: Pick reasonable default values def random_forest_regressor(name, n_estimators=None, criterion=None, max_features=None, max_depth=None, min_samples_split=None, min_samples_leaf=None, bootstrap=None, oob_score=None, n_jobs=1, random_state=None, verbose=False): def _name(msg): return '%s.%s_%s' % (name, 'random_forest', msg) """ Out of bag estimation only available if bootstrap=True """ bootstrap_oob = hp.choice(_name('bootstrap_oob'), [(True, True), (True, False), (False, False)]) rval = scope.sklearn_RandomForestRegressor( n_estimators=scope.int(hp.quniform( _name('n_estimators'), 1, 50, 1)) if n_estimators is None else n_estimators, criterion=hp.choice( _name('criterion'), ['mse']) if criterion is None else criterion, max_features=hp.choice( _name('max_features'), ['auto', 'sqrt', 'log2', None]) if max_features is None else max_features, max_depth=max_depth, min_samples_split=hp.quniform( _name('min_samples_split'), 1, 10, 1) if min_samples_split is None else min_samples_split, min_samples_leaf=hp.quniform( _name('min_samples_leaf'), 1, 5, 1) if min_samples_leaf is None else min_samples_leaf, bootstrap=bootstrap_oob[0] if bootstrap is None else bootstrap, oob_score=bootstrap_oob[1] if oob_score is None else oob_score, n_jobs=n_jobs, random_state=_random_state(_name('rstate'), random_state), verbose=verbose, ) return rval # TODO: Pick reasonable default values # TODO: the parameters are the same as RandomForest, stick em together somehow def extra_trees(name, n_estimators=None, criterion=None, max_features=None, max_depth=None, min_samples_split=None, min_samples_leaf=None, bootstrap=None, oob_score=None, n_jobs=1, random_state=None, verbose=False): def _name(msg): return '%s.%s_%s' % (name, 'extra_trees', msg) bootstrap_oob = hp.choice(_name('bootstrap_oob'), [(True, True), (True, False), (False, False)]) rval = scope.sklearn_ExtraTreesClassifier( n_estimators=scope.int(hp.quniform( _name('n_estimators'), 1, 50, 1)) if n_estimators is None else n_estimators, criterion=hp.choice( _name('criterion'), ['gini', 'entropy']) if criterion is None else criterion, max_features=hp.choice( _name('max_features'), ['sqrt', 'log2', None]) if max_features is None else max_features, max_depth=max_depth, min_samples_split=hp.quniform( _name('min_samples_split'), 1, 10, 1) if min_samples_split is None else min_samples_split, min_samples_leaf=hp.quniform( _name('min_samples_leaf'), 1, 5, 1) if min_samples_leaf is None else min_samples_leaf, bootstrap=bootstrap_oob[0] if bootstrap is None else bootstrap, oob_score=bootstrap_oob[1] if oob_score is None else oob_score, n_jobs=n_jobs, random_state=_random_state(_name('rstate'), random_state), verbose=verbose, ) return rval # TODO: Pick reasonable default values # TODO: the parameters are the same as RandomForest, stick em together somehow def extra_trees_regressor(name, n_estimators=None, criterion=None, max_features=None, max_depth=None, min_samples_split=None, min_samples_leaf=None, bootstrap=None, oob_score=None, n_jobs=1, random_state=None, verbose=False): def _name(msg): return '%s.%s_%s' % (name, 'extra_trees', msg) bootstrap_oob = hp.choice(_name('bootstrap_oob'), [(True, True), (True, False), (False, False)]) rval = scope.sklearn_ExtraTreesRegressor( n_estimators=scope.int(hp.quniform( _name('n_estimators'), 1, 50, 1)) if n_estimators is None else n_estimators, criterion=hp.choice( _name('criterion'), ['mse']) if criterion is None else criterion, max_features=hp.choice( _name('max_features'), ['auto', 'sqrt', 'log2', None]) if max_features is None else max_features, max_depth=max_depth, min_samples_split=hp.quniform( _name('min_samples_split'), 1, 10, 1) if min_samples_split is None else min_samples_split, min_samples_leaf=hp.quniform( _name('min_samples_leaf'), 1, 5, 1) if min_samples_leaf is None else min_samples_leaf, bootstrap=bootstrap_oob[0] if bootstrap is None else bootstrap, oob_score=bootstrap_oob[1] if oob_score is None else oob_score, n_jobs=n_jobs, random_state=_random_state(_name('rstate'), random_state), verbose=verbose, ) return rval def sgd(name, loss=None, # default - 'hinge' penalty=None, # default - 'l2' alpha=None, # default - 0.0001 l1_ratio=None, # default - 0.15, must be within [0, 1] fit_intercept=None, # default - True n_iter=None, # default - 5 shuffle=None, # default - False random_state=None, # default - None epsilon=None, n_jobs=1, # default - 1 (-1 means all CPUs) learning_rate=None, # default - 'invscaling' eta0=None, # default - 0.01 power_t=None, # default - 0.5 class_weight=None, warm_start=False, verbose=False, ): def _name(msg): return '%s.%s_%s' % (name, 'sgd', msg) rval = scope.sklearn_SGDClassifier( loss=hp.pchoice( _name('loss'), [ #(0.00, 'hinge'), # no probability (0.5, 'log'), (0.5, 'modified_huber'), #(0.00, 'squared_hinge'), # no probability #(0.05, 'perceptron'), #(0.05, 'squared_loss'), #(0.05, 'huber'), #(0.03, 'epsilon_insensitive'), #(0.02, 'squared_epsilon_insensitive'), ]) if loss is None else loss, penalty=hp.pchoice( _name('penalty'), [ (0.60, 'l2'), (0.15, 'l1'), (0.25, 'elasticnet') ]) if penalty is None else penalty, alpha=hp.loguniform( _name('alpha'), np.log(1e-5), np.log(1)) if alpha is None else alpha, l1_ratio=hp.uniform( _name('l1_ratio'), 0, 1) if l1_ratio is None else l1_ratio, fit_intercept=hp.pchoice( _name('fit_intercept'), [ (0.8, True), (0.2, False) ]) if fit_intercept is None else fit_intercept, learning_rate='invscaling' if learning_rate is None else learning_rate, eta0=hp.loguniform( _name('eta0'), np.log(1e-5), np.log(1e-1)) if eta0 is None else eta0, power_t=hp.uniform( _name('power_t'), 0, 1) if power_t is None else power_t, n_jobs=n_jobs, verbose=verbose, ) return rval def sgd_regressor(name, loss=None, # default - 'hinge' penalty=None, # default - 'l2' alpha=None, # default - 0.0001 l1_ratio=None, # default - 0.15, must be within [0, 1] fit_intercept=None, # default - True n_iter=None, # default - 5 shuffle=None, # default - False random_state=None, # default - None epsilon=None, # n_jobs=1, #default - 1 (-1 means all CPUs) learning_rate=None, # default - 'invscaling' eta0=None, # default - 0.01 power_t=None, # default - 0.5 # class_weight=None, warm_start=False, verbose=False, ): def _name(msg): return '%s.%s_%s' % (name, 'sgd_regressor', msg) rval = scope.sklearn_SGDRegressor( loss=hp.pchoice( _name('loss'), [ (0.35, 'squared_loss'), (0.35, 'huber'), (0.20, 'epsilon_insensitive'), (0.10, 'squared_epsilon_insensitive') ]) if loss is None else loss, penalty=hp.pchoice( _name('penalty'), [ (0.40, 'l2'), (0.35, 'l1'), (0.25, 'elasticnet') ]) if penalty is None else penalty, alpha=hp.loguniform( _name('alpha'), np.log(1e-6), np.log(1)) if alpha is None else alpha, l1_ratio=hp.uniform( _name('l1_ratio'), 0, 1) if l1_ratio is None else l1_ratio, fit_intercept=hp.pchoice( _name('fit_intercept'), [ (0.8, True), (0.2, False) ]) if fit_intercept is None else fit_intercept, learning_rate='invscaling' if learning_rate is None else learning_rate, eta0=hp.loguniform( _name('eta0'), np.log(1e-5), np.log(1e-1)) if eta0 is None else eta0, power_t=hp.uniform( _name('power_t'), 0, 1) if power_t is None else power_t, # n_jobs=n_jobs, verbose=verbose, ) return rval def multinomial_nb(name, alpha=None, fit_prior=None, ): def _name(msg): return '%s.%s_%s' % (name, 'multinomial_nb', msg) rval = scope.sklearn_MultinomialNB( alpha=hp.quniform( _name('alpha'), 0, 1, 0.001) if alpha is None else alpha, fit_prior=hp.choice( _name('fit_prior'), [ True, False ]) if fit_prior is None else fit_prior, ) return rval def any_classifier(name): return hp.choice('%s' % name, [ #svc(name + '.svc'), # knn(name + '.knn'), random_forest(name + '.random_forest', n_jobs=-1), extra_trees(name + '.extra_trees', n_jobs=-1), sgd(name + '.sgd', n_jobs=-1), ]) def any_sparse_classifier(name): return hp.choice('%s' % name, [ #svc(name + '.svc'), sgd(name + '.sgd', n_jobs=-1), # knn(name + '.knn', sparse_data=True), multinomial_nb(name + '.multinomial_nb') ]) def any_regressor(name): return hp.choice('%s' % name, [ sgd_regressor(name + '.sgd_regressor'), random_forest_regressor(name + '.random_forest_regressor', n_jobs=-1), extra_trees_regressor(name + '.extra_trees_regressor', n_jobs=-1), ]) def any_sparse_regressor(name): return hp.choice('%s' % name, [ sgd_regressor(name + '.sgd_regressor') ]) def pca(name, n_components=None, whiten=None, max_components=np.Inf, copy=True): rval = scope.sklearn_PCA( # -- qloguniform is missing a "scale" parameter so we # lower the "high" parameter and multiply by 4 out front n_components=4 * scope.int( hp.qloguniform( name + '.n_components', low=np.log(0.51), high=np.log(min(max_components / 4, 30.5)), q=1.0)) if n_components is None else n_components, whiten=hp_bool( name + '.whiten', ) if whiten is None else whiten, copy=copy, ) return rval def nystrom(name, n_components=None, kernel=None, max_components=np.Inf, copy=True): def _name(msg): return '%s.%s_%s' % (name, 'nystrom', msg) rval = scope.sklearn_Nystrom( n_components=4 * scope.int( hp.qloguniform( name + '.n_components', low=np.log(0.51), high=np.log(min(max_components / 4, 30.5)), q=1.0)) if n_components is None else n_components, kernel=hp.pchoice( _name('kernel'), [ (0.35, 'sigmoid'), (0.35, 'rbf'), (0.30, 'poly')]) if kernel is None else kernel, gamma=_svc_gamma('gamma'), coef0=hp.uniform(_name('coef0'), 0.0, 1.0) ) return rval def standard_scaler(name, with_mean=None, with_std=None): rval = scope.sklearn_StandardScaler( with_mean=hp_bool( name + '.with_mean', ) if with_mean is None else with_mean, with_std=hp_bool( name + '.with_std', ) if with_std is None else with_std, ) return rval def tfidf(name, analyzer=None, ngram_range=None, stop_words=None, lowercase=None, max_df=1.0, min_df=1, max_features=None, binary=None, norm=None, use_idf=False, smooth_idf=False, sublinear_tf=False, ): def _name(msg): return '%s.%s_%s' % (name, 'tfidf', msg) max_ngram = scope.int(hp.quniform( _name('max_ngram'), 1, 4, 1)) rval = scope.sklearn_Tfidf( stop_words=hp.choice( _name('stop_words'), [ 'english', None ]) if analyzer is None else analyzer, lowercase=hp_bool( _name('lowercase'), ) if lowercase is None else lowercase, max_df=max_df, min_df=min_df, binary=hp_bool( _name('binary'), ) if binary is None else binary, ngram_range=(1, max_ngram) if ngram_range is None else ngram_range, norm=norm, use_idf=use_idf, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf, ) return rval def min_max_scaler(name, feature_range=None, copy=True): if feature_range is None: feature_range = ( hp.choice(name + '.feature_min', [-1.0, 0.0]), 1.0) rval = scope.sklearn_MinMaxScaler( feature_range=feature_range, copy=copy, ) return rval def normalizer(name, norm=None): rval = scope.sklearn_Normalizer( norm=hp.choice( name + '.with_mean', ['l1', 'l2'], ) if norm is None else norm, ) return rval def one_hot_encoder(name, n_values=None, categorical_features=None, dtype=None): rval = scope.sklearn_OneHotEncoder( n_values='auto' if n_values is None else n_values, categorical_features=('all' if categorical_features is None else categorical_features), dtype=np.float if dtype is None else dtype, ) return rval def rbm(name, n_components=None, learning_rate=None, batch_size=None, n_iter=None, verbose=False, random_state=None): rval = scope.sklearn_BernoulliRBM( n_components=scope.int( hp.qloguniform( name + '.n_components', low=np.log(0.51), high=np.log(999.5), q=1.0)) if n_components is None else n_components, learning_rate=hp.lognormal( name + '.learning_rate', np.log(0.01), np.log(10), ) if learning_rate is None else learning_rate, batch_size=scope.int( hp.qloguniform( name + '.batch_size', np.log(1), np.log(100), q=1, )) if batch_size is None else batch_size, n_iter=scope.int( hp.qloguniform( name + '.n_iter', np.log(1), np.log(1000), # -- max sweeps over the *whole* train set q=1, )) if n_iter is None else n_iter, verbose=verbose, random_state=_random_state(name + '.rstate', random_state), ) return rval def colkmeans(name, n_clusters=None, init=None, n_init=None, max_iter=None, tol=None, precompute_distances=True, verbose=0, random_state=None, copy_x=True, n_jobs=1): rval = scope.sklearn_ColumnKMeans( n_clusters=scope.int( hp.qloguniform( name + '.n_clusters', low=np.log(1.51), high=np.log(19.5), q=1.0)) if n_clusters is None else n_clusters, init=hp.choice( name + '.init', ['k-means++', 'random'], ) if init is None else init, n_init=hp.choice( name + '.n_init', [1, 2, 10, 20], ) if n_init is None else n_init, max_iter=scope.int( hp.qlognormal( name + '.max_iter', np.log(300), np.log(10), q=1, )) if max_iter is None else max_iter, tol=hp.lognormal( name + '.tol', np.log(0.0001), np.log(10), ) if tol is None else tol, precompute_distances=precompute_distances, verbose=verbose, random_state=random_state, copy_x=copy_x, n_jobs=n_jobs, ) return rval # XXX: todo GaussianRandomProjection # XXX: todo SparseRandomProjection def any_preprocessing(name, n_features=None, sparse=False): """Generic pre-processing appropriate for a wide variety of data """ choices = [ [nystrom(name + '.nystrom', max_components=n_features)], [standard_scaler(name + '.standard_scaler', with_mean=not sparse)], #[min_max_scaler(name + '.min_max_scaler')], [normalizer(name + '.normalizer')], [], # -- not putting in one-hot because it can make vectors huge # [one_hot_encoder(name + '.one_hot_encoder')], ] if not sparse: choices.append([pca(name + '.pca', max_components=n_features)]), return hp.choice('%s' % name, choices) def any_text_preprocessing(name): """Generic pre-processing appropriate for text data """ return hp.choice('%s' % name, [ [tfidf(name + '.tfidf')], ]) def generic_space(name='space'): model = hp.pchoice('%s' % name, [ (.8, {'preprocessing': [pca(name + '.pca')], 'classifier': any_classifier(name + '.pca_clsf') }), (.2, {'preprocessing': [min_max_scaler(name + '.min_max_scaler')], 'classifier': any_classifier(name + '.min_max_clsf'), }), ]) return as_apply({'model': model}) # -- flake8 eof
[ "import numpy as np\n", "import sklearn.svm\n", "import sklearn.ensemble\n", "import sklearn.neighbors\n", "import sklearn.decomposition\n", "import sklearn.preprocessing\n", "import sklearn.neural_network\n", "import sklearn.linear_model\n", "import sklearn.feature_extraction.text\n", "import sklearn.naive_bayes\n", "import sklearn.kernel_approximation\n", "from hyperopt.pyll import scope, as_apply\n", "from hyperopt import hp\n", "from .vkmeans import ColumnKMeans\n", "\n", "\"\"\"\n", "Sourceed from jaberg/hyperopt-sklearn\n", "Additional models added by tadejs\n", "\"\"\"\n", "\n", "@scope.define\n", "def sklearn_SVC(*args, **kwargs):\n", " return sklearn.svm.SVC(*args, **kwargs)\n", "\n", "\n", "@scope.define\n", "def sklearn_LinearSVC(*args, **kwargs):\n", " return sklearn.svm.LinearSVC(*args, **kwargs)\n", "\n", "\n", "@scope.define\n", "def sklearn_KNeighborsClassifier(*args, **kwargs):\n", " star_star_kwargs = kwargs.pop('starstar_kwargs')\n", " kwargs.update(star_star_kwargs)\n", " return sklearn.neighbors.KNeighborsClassifier(*args, **kwargs)\n", "\n", "\n", "@scope.define\n", "def sklearn_RandomForestClassifier(*args, **kwargs):\n", " return sklearn.ensemble.RandomForestClassifier(*args, **kwargs)\n", "\n", "\n", "@scope.define\n", "def sklearn_ExtraTreesClassifier(*args, **kwargs):\n", " return sklearn.ensemble.ExtraTreesClassifier(*args, **kwargs)\n", "\n", "@scope.define\n", "def sklearn_RandomForestRegressor(*args, **kwargs):\n", " return sklearn.ensemble.RandomForestRegressor(*args, **kwargs)\n", "\n", "\n", "@scope.define\n", "def sklearn_ExtraTreesRegressor(*args, **kwargs):\n", " return sklearn.ensemble.ExtraTreesRegressor(*args, **kwargs)\n", "\n", "@scope.define\n", "def sklearn_SGDClassifier(*args, **kwargs):\n", " return sklearn.linear_model.SGDClassifier(*args, **kwargs)\n", "\n", "@scope.define\n", "def sklearn_SGDRegressor(*args, **kwargs):\n", " return sklearn.linear_model.SGDRegressor(*args, **kwargs)\n", "\n", "@scope.define\n", "def sklearn_MultinomialNB(*args, **kwargs):\n", " return sklearn.naive_bayes.MultinomialNB(*args, **kwargs)\n", "\n", "\n", "@scope.define\n", "def sklearn_PCA(*args, **kwargs):\n", " return sklearn.decomposition.PCA(*args, **kwargs)\n", "\n", "\n", "@scope.define\n", "def sklearn_Nystrom(*args, **kwargs):\n", " return sklearn.kernel_approximation.Nystroem(*args, **kwargs)\n", "\n", "\n", "@scope.define\n", "def sklearn_Tfidf(*args, **kwargs):\n", " return sklearn.feature_extraction.text.TfidfVectorizer(*args, **kwargs)\n", "\n", "\n", "@scope.define\n", "def sklearn_StandardScaler(*args, **kwargs):\n", " return sklearn.preprocessing.StandardScaler(*args, **kwargs)\n", "\n", "\n", "@scope.define\n", "def sklearn_MinMaxScaler(*args, **kwargs):\n", " return sklearn.preprocessing.MinMaxScaler(*args, **kwargs)\n", "\n", "\n", "@scope.define\n", "def sklearn_Normalizer(*args, **kwargs):\n", " return sklearn.preprocessing.Normalizer(*args, **kwargs)\n", "\n", "\n", "@scope.define\n", "def sklearn_OneHotEncoder(*args, **kwargs):\n", " return sklearn.preprocessing.OneHotEncoder(*args, **kwargs)\n", "\n", "\n", "@scope.define\n", "def sklearn_BernoulliRBM(*args, **kwargs):\n", " return sklearn.neural_network.BernoulliRBM(*args, **kwargs)\n", "\n", "\n", "@scope.define\n", "def sklearn_ColumnKMeans(*args, **kwargs):\n", " return ColumnKMeans(*args, **kwargs)\n", "\n", "\n", "@scope.define\n", "def patience_param(x):\n", " \"\"\"\n", " Mark a hyperparameter as having a simple monotonic increasing\n", " relationship with both CPU time and the goodness of the model.\n", " \"\"\"\n", " # -- TODO: make this do something!\n", " return x\n", "\n", "\n", "@scope.define\n", "def inv_patience_param(x):\n", " \"\"\"\n", " Mark a hyperparameter as having a simple monotonic decreasing\n", " relationship with both CPU time and the goodness of the model.\n", " \"\"\"\n", " # -- TODO: make this do something!\n", " return x\n", "\n", "\n", "def hp_bool(name):\n", " return hp.choice(name, [False, True])\n", "\n", "\n", "_svc_default_cache_size = 1000.0\n", "\n", "\n", "def _svc_gamma(name):\n", " # -- making these non-conditional variables\n", " # probably helps the GP algorithm generalize\n", " gammanz = hp.choice(name + '.gammanz', [0, 1])\n", " gamma = hp.lognormal(name + '.gamma', np.log(0.01), 2.5)\n", " return gammanz * gamma\n", "\n", "\n", "def _svc_max_iter(name):\n", " return scope.patience_param(\n", " scope.int(\n", " hp.loguniform(\n", " name + '.max_iter',\n", " np.log(1e7),\n", " np.log(1e9))))\n", "\n", "\n", "def _svc_C(name):\n", " return hp.lognormal(name + '.C', np.log(1000.0), 3.0)\n", "\n", "\n", "def _svc_tol(name):\n", " return scope.inv_patience_param(\n", " hp.lognormal(\n", " name + '.tol',\n", " np.log(1e-3),\n", " 2.0))\n", "\n", "def _random_state(name, random_state):\n", " if random_state is None:\n", " return hp.randint(name, 5)\n", " else:\n", " return random_state\n", "\n", "\n", "def svc_linear(name,\n", " C=None,\n", " shrinking=None,\n", " tol=None,\n", " max_iter=None,\n", " verbose=False,\n", " random_state=None,\n", " cache_size=_svc_default_cache_size):\n", " \"\"\"\n", " Return a pyll graph with hyperparamters that will construct\n", " a sklearn.svm.SVC model with a linear kernel.\n", "\n", " \"\"\"\n", " def _name(msg):\n", " return '%s.%s_%s' % (name, 'linear', msg)\n", "\n", " rval = scope.sklearn_SVC(\n", " kernel='linear',\n", " C=_svc_C(name + '.linear') if C is None else C,\n", " shrinking=hp_bool(\n", " _name('shrinking')) if shrinking is None else shrinking,\n", " tol=_svc_tol(name) if tol is None else tol,\n", " max_iter=_svc_max_iter(name) if max_iter is None else max_iter,\n", " verbose=verbose,\n", " random_state=_random_state(_name('.rstate'), random_state),\n", " cache_size=cache_size,\n", " )\n", " return rval\n", "\n", "\n", "def svc_rbf(name,\n", " C=None,\n", " gamma=None,\n", " shrinking=None,\n", " tol=None,\n", " max_iter=None,\n", " verbose=False,\n", " random_state=None,\n", " cache_size=_svc_default_cache_size):\n", " \"\"\"\n", " Return a pyll graph with hyperparamters that will construct\n", " a sklearn.svm.SVC model with an RBF kernel.\n", "\n", " \"\"\"\n", " def _name(msg):\n", " return '%s.%s_%s' % (name, 'rbf', msg)\n", "\n", " rval = scope.sklearn_SVC(\n", " kernel='rbf',\n", " C=_svc_C(name + '.rbf') if C is None else C,\n", " gamma=_svc_gamma(name) if gamma is None else gamma,\n", " shrinking=hp_bool(\n", " _name('shrinking')) if shrinking is None else shrinking,\n", " tol=_svc_tol(name + '.rbf') if tol is None else tol,\n", " max_iter=(_svc_max_iter(name + '.rbf')\n", " if max_iter is None else max_iter),\n", " verbose=verbose,\n", " cache_size=cache_size,\n", " random_state=_random_state(_name('rstate'), random_state),\n", " )\n", " return rval\n", "\n", "\n", "def svc_poly(name,\n", " C=None,\n", " gamma=None,\n", " coef0=None,\n", " degree=None,\n", " shrinking=None,\n", " tol=None,\n", " max_iter=None,\n", " verbose=False,\n", " random_state=None,\n", " cache_size=_svc_default_cache_size):\n", " \"\"\"\n", " Return a pyll graph with hyperparamters that will construct\n", " a sklearn.svm.SVC model with an RBF kernel.\n", "\n", " \"\"\"\n", " def _name(msg):\n", " return '%s.%s_%s' % (name, 'poly', msg)\n", "\n", " # -- (K(x, y) + coef0)^d\n", " coef0nz = hp.choice(_name('coef0nz'), [0, 1])\n", " coef0 = hp.uniform(_name('coef0'), 0.0, 1.0)\n", " poly_coef0 = coef0nz * coef0\n", "\n", " rval = scope.sklearn_SVC(\n", " kernel='poly',\n", " C=_svc_C(name + '.poly') if C is None else C,\n", " gamma=_svc_gamma(name + '.poly') if gamma is None else gamma,\n", " coef0=poly_coef0 if coef0 is None else coef0,\n", " degree=hp.quniform(\n", " _name('degree'),\n", " low=1.5,\n", " high=8.5,\n", " q=1) if degree is None else degree,\n", " shrinking=hp_bool(\n", " _name('shrinking')) if shrinking is None else shrinking,\n", " tol=_svc_tol(name + '.poly') if tol is None else tol,\n", " max_iter=(_svc_max_iter(name + '.poly')\n", " if max_iter is None else max_iter),\n", " verbose=verbose,\n", " random_state=_random_state(_name('.rstate'), random_state),\n", " cache_size=cache_size,\n", " )\n", " return rval\n", "\n", "\n", "def svc_sigmoid(name,\n", " C=None,\n", " gamma=None,\n", " coef0=None,\n", " shrinking=None,\n", " tol=None,\n", " max_iter=None,\n", " verbose=False,\n", " random_state=None,\n", " cache_size=_svc_default_cache_size):\n", " \"\"\"\n", " Return a pyll graph with hyperparamters that will construct\n", " a sklearn.svm.SVC model with an RBF kernel.\n", "\n", " \"\"\"\n", " def _name(msg):\n", " return '%s.%s_%s' % (name, 'sigmoid', msg)\n", "\n", " # -- tanh(K(x, y) + coef0)\n", " coef0nz = hp.choice(_name('coef0nz'), [0, 1])\n", " coef0 = hp.normal(_name('coef0'), 0.0, 1.0)\n", " sigm_coef0 = coef0nz * coef0\n", "\n", " rval = scope.sklearn_SVC(\n", " kernel='sigmoid',\n", " C=_svc_C(name + '.sigmoid') if C is None else C,\n", " gamma=_svc_gamma(name + '.sigmoid') if gamma is None else gamma,\n", " coef0=sigm_coef0 if coef0 is None else coef0,\n", " shrinking=hp_bool(\n", " _name('shrinking')) if shrinking is None else shrinking,\n", " tol=_svc_tol(name + '.sigmoid') if tol is None else tol,\n", " max_iter=(_svc_max_iter(name + '.sigmoid')\n", " if max_iter is None else max_iter),\n", " verbose=verbose,\n", " random_state=_random_state(_name('rstate'), random_state),\n", " cache_size=cache_size)\n", " return rval\n", "\n", "\n", "def svc(name,\n", " C=None,\n", " kernels=['linear', 'rbf', 'poly', 'sigmoid'],\n", " shrinking=None,\n", " tol=None,\n", " max_iter=None,\n", " verbose=False,\n", " random_state=None,\n", " cache_size=_svc_default_cache_size):\n", " svms = {\n", " 'linear': svc_linear(\n", " name,\n", " C=C,\n", " shrinking=shrinking,\n", " tol=tol,\n", " max_iter=max_iter,\n", " random_state=random_state,\n", " verbose=verbose),\n", " 'rbf': svc_rbf(\n", " name,\n", " C=C,\n", " shrinking=shrinking,\n", " tol=tol,\n", " max_iter=max_iter,\n", " random_state=random_state,\n", " verbose=verbose),\n", " 'poly': svc_poly(\n", " name,\n", " C=C,\n", " shrinking=shrinking,\n", " tol=tol,\n", " max_iter=max_iter,\n", " random_state=random_state,\n", " verbose=verbose),\n", " 'sigmoid': svc_sigmoid(\n", " name,\n", " C=C,\n", " shrinking=shrinking,\n", " tol=tol,\n", " max_iter=max_iter,\n", " random_state=random_state,\n", " verbose=verbose),\n", " }\n", " choices = [svms[kern] for kern in kernels]\n", " if len(choices) == 1:\n", " rval = choices[0]\n", " else:\n", " rval = hp.choice('%s.kernel' % name, choices)\n", " return rval\n", "\n", "\n", "# TODO: Some combinations of parameters are not allowed in LinearSVC\n", "def liblinear_svc(name,\n", " C=None,\n", " loss=None,\n", " penalty=None,\n", " dual=None,\n", " tol=None,\n", " multi_class=None,\n", " fit_intercept=None,\n", " intercept_scaling=None,\n", " class_weight=None,\n", " random_state=None,\n", " verbose=False):\n", "\n", " def _name(msg):\n", " return '%s.%s_%s' % (name, 'linear_svc', msg)\n", "\n", " \"\"\"\n", " The combination of penalty='l1' and loss='l1' is not supported\n", " penalty='l2' and ploss='l1' is only supported when dual='true'\n", " penalty='l1' is only supported when dual='false'\n", " \"\"\"\n", " loss_penalty_dual = hp.choice(_name('loss_penalty_dual'),\n", " [('l1', 'l2', True),\n", " ('l2', 'l2', True),\n", " ('l2', 'l1', False),\n", " ('l2', 'l2', False)])\n", "\n", " rval = scope.sklearn_LinearSVC(\n", " C=_svc_C(name + '.liblinear') if C is None else C,\n", " loss=loss_penalty_dual[0] if loss is None else loss,\n", " penalty=loss_penalty_dual[1] if penalty is None else penalty,\n", " dual=loss_penalty_dual[2] if dual is None else dual,\n", " tol=_svc_tol(name + '.liblinear') if tol is None else tol,\n", " multi_class=hp.choice(\n", " _name('multi_class'),\n", " ['ovr', 'crammer_singer']) if multi_class is None else multi_class,\n", " fit_intercept=hp.choice(\n", " _name('fit_intercept'),\n", " [True, False]) if fit_intercept is None else fit_intercept,\n", " random_state=_random_state(_name('rstate'), random_state),\n", " verbose=verbose,\n", " )\n", " return rval\n", "\n", "\n", "# TODO: Pick reasonable default values\n", "def knn(name,\n", " sparse_data=False,\n", " n_neighbors=None,\n", " weights=None,\n", " leaf_size=None,\n", " metric=None,\n", " p=None,\n", " **kwargs):\n", "\n", " def _name(msg):\n", " return '%s.%s_%s' % (name, 'knn', msg)\n", "\n", " if sparse_data:\n", " metric_args = { 'metric':'euclidean' }\n", " else:\n", " metric_args = hp.pchoice(_name('metric'), [\n", " (0.65, { 'metric':'euclidean' }),\n", " (0.10, { 'metric':'manhattan' }),\n", " (0.10, { 'metric':'chebyshev' }),\n", " (0.10, { 'metric':'minkowski',\n", " 'p':scope.int(hp.quniform(_name('minkowski_p'), 1, 5, 1))}),\n", " (0.05, { 'metric':'wminkowski',\n", " 'p':scope.int(hp.quniform(_name('wminkowski_p'), 1, 5, 1)),\n", " 'w':hp.uniform(_name('wminkowski_w'), 0, 100) }),\n", " ])\n", "\n", " rval = scope.sklearn_KNeighborsClassifier(\n", " n_neighbors=scope.int(hp.quniform(\n", " _name('n_neighbors'),\n", " 0.5, 50, 1)) if n_neighbors is None else n_neighbors,\n", " weights=hp.choice(\n", " _name('weights'),\n", " ['uniform', 'distance']) if weights is None else weights,\n", " leaf_size=scope.int(hp.quniform(\n", " _name('leaf_size'),\n", " 0.51, 100, 1)) if leaf_size is None else leaf_size,\n", " starstar_kwargs=metric_args\n", " )\n", " return rval\n", "\n", "# TODO: Pick reasonable default values\n", "def random_forest(name,\n", " n_estimators=None,\n", " criterion=None,\n", " max_features=None,\n", " max_depth=None,\n", " min_samples_split=None,\n", " min_samples_leaf=None,\n", " bootstrap=None,\n", " oob_score=None,\n", " n_jobs=1,\n", " random_state=None,\n", " verbose=False):\n", "\n", " def _name(msg):\n", " return '%s.%s_%s' % (name, 'random_forest', msg)\n", "\n", " \"\"\"\n", " Out of bag estimation only available if bootstrap=True\n", " \"\"\"\n", "\n", " bootstrap_oob = hp.choice(_name('bootstrap_oob'),\n", " [(True, True),\n", " (True, False),\n", " (False, False)])\n", "\n", " rval = scope.sklearn_RandomForestClassifier(\n", " n_estimators=scope.int(hp.quniform(\n", " _name('n_estimators'),\n", " 1, 50, 1)) if n_estimators is None else n_estimators,\n", " criterion=hp.choice(\n", " _name('criterion'),\n", " ['gini', 'entropy']) if criterion is None else criterion,\n", " max_features=hp.choice(\n", " _name('max_features'),\n", " ['sqrt', 'log2',\n", " None]) if max_features is None else max_features,\n", " max_depth=max_depth,\n", " min_samples_split=hp.quniform(\n", " _name('min_samples_split'),\n", " 1, 10, 1) if min_samples_split is None else min_samples_split,\n", " min_samples_leaf=hp.quniform(\n", " _name('min_samples_leaf'),\n", " 1, 5, 1) if min_samples_leaf is None else min_samples_leaf,\n", " bootstrap=bootstrap_oob[0] if bootstrap is None else bootstrap,\n", " oob_score=bootstrap_oob[1] if oob_score is None else oob_score,\n", " n_jobs=n_jobs,\n", " random_state=_random_state(_name('rstate'), random_state),\n", " verbose=verbose,\n", " )\n", " return rval\n", "\n", "\n", "# TODO: Pick reasonable default values\n", "def random_forest_regressor(name,\n", " n_estimators=None,\n", " criterion=None,\n", " max_features=None,\n", " max_depth=None,\n", " min_samples_split=None,\n", " min_samples_leaf=None,\n", " bootstrap=None,\n", " oob_score=None,\n", " n_jobs=1,\n", " random_state=None,\n", " verbose=False):\n", "\n", " def _name(msg):\n", " return '%s.%s_%s' % (name, 'random_forest', msg)\n", "\n", " \"\"\"\n", " Out of bag estimation only available if bootstrap=True\n", " \"\"\"\n", "\n", " bootstrap_oob = hp.choice(_name('bootstrap_oob'),\n", " [(True, True),\n", " (True, False),\n", " (False, False)])\n", "\n", " rval = scope.sklearn_RandomForestRegressor(\n", " n_estimators=scope.int(hp.quniform(\n", " _name('n_estimators'),\n", " 1, 50, 1)) if n_estimators is None else n_estimators,\n", " criterion=hp.choice(\n", " _name('criterion'),\n", " ['mse']) if criterion is None else criterion,\n", " max_features=hp.choice(\n", " _name('max_features'),\n", " ['auto', 'sqrt', 'log2',\n", " None]) if max_features is None else max_features,\n", " max_depth=max_depth,\n", " min_samples_split=hp.quniform(\n", " _name('min_samples_split'),\n", " 1, 10, 1) if min_samples_split is None else min_samples_split,\n", " min_samples_leaf=hp.quniform(\n", " _name('min_samples_leaf'),\n", " 1, 5, 1) if min_samples_leaf is None else min_samples_leaf,\n", " bootstrap=bootstrap_oob[0] if bootstrap is None else bootstrap,\n", " oob_score=bootstrap_oob[1] if oob_score is None else oob_score,\n", " n_jobs=n_jobs,\n", " random_state=_random_state(_name('rstate'), random_state),\n", " verbose=verbose,\n", " )\n", " return rval\n", "\n", "\n", "\n", "# TODO: Pick reasonable default values\n", "# TODO: the parameters are the same as RandomForest, stick em together somehow\n", "def extra_trees(name,\n", " n_estimators=None,\n", " criterion=None,\n", " max_features=None,\n", " max_depth=None,\n", " min_samples_split=None,\n", " min_samples_leaf=None,\n", " bootstrap=None,\n", " oob_score=None,\n", " n_jobs=1,\n", " random_state=None,\n", " verbose=False):\n", "\n", " def _name(msg):\n", " return '%s.%s_%s' % (name, 'extra_trees', msg)\n", "\n", " bootstrap_oob = hp.choice(_name('bootstrap_oob'),\n", " [(True, True),\n", " (True, False),\n", " (False, False)])\n", "\n", " rval = scope.sklearn_ExtraTreesClassifier(\n", " n_estimators=scope.int(hp.quniform(\n", " _name('n_estimators'),\n", " 1, 50, 1)) if n_estimators is None else n_estimators,\n", " criterion=hp.choice(\n", " _name('criterion'),\n", " ['gini', 'entropy']) if criterion is None else criterion,\n", " max_features=hp.choice(\n", " _name('max_features'),\n", " ['sqrt', 'log2',\n", " None]) if max_features is None else max_features,\n", " max_depth=max_depth,\n", " min_samples_split=hp.quniform(\n", " _name('min_samples_split'),\n", " 1, 10, 1) if min_samples_split is None else min_samples_split,\n", " min_samples_leaf=hp.quniform(\n", " _name('min_samples_leaf'),\n", " 1, 5, 1) if min_samples_leaf is None else min_samples_leaf,\n", " bootstrap=bootstrap_oob[0] if bootstrap is None else bootstrap,\n", " oob_score=bootstrap_oob[1] if oob_score is None else oob_score,\n", " n_jobs=n_jobs,\n", " random_state=_random_state(_name('rstate'), random_state),\n", " verbose=verbose,\n", " )\n", " return rval\n", "\n", "# TODO: Pick reasonable default values\n", "# TODO: the parameters are the same as RandomForest, stick em together somehow\n", "def extra_trees_regressor(name,\n", " n_estimators=None,\n", " criterion=None,\n", " max_features=None,\n", " max_depth=None,\n", " min_samples_split=None,\n", " min_samples_leaf=None,\n", " bootstrap=None,\n", " oob_score=None,\n", " n_jobs=1,\n", " random_state=None,\n", " verbose=False):\n", "\n", " def _name(msg):\n", " return '%s.%s_%s' % (name, 'extra_trees', msg)\n", "\n", " bootstrap_oob = hp.choice(_name('bootstrap_oob'),\n", " [(True, True),\n", " (True, False),\n", " (False, False)])\n", "\n", " rval = scope.sklearn_ExtraTreesRegressor(\n", " n_estimators=scope.int(hp.quniform(\n", " _name('n_estimators'),\n", " 1, 50, 1)) if n_estimators is None else n_estimators,\n", " criterion=hp.choice(\n", " _name('criterion'),\n", " ['mse']) if criterion is None else criterion,\n", " max_features=hp.choice(\n", " _name('max_features'),\n", " ['auto', 'sqrt', 'log2',\n", " None]) if max_features is None else max_features,\n", " max_depth=max_depth,\n", " min_samples_split=hp.quniform(\n", " _name('min_samples_split'),\n", " 1, 10, 1) if min_samples_split is None else min_samples_split,\n", " min_samples_leaf=hp.quniform(\n", " _name('min_samples_leaf'),\n", " 1, 5, 1) if min_samples_leaf is None else min_samples_leaf,\n", " bootstrap=bootstrap_oob[0] if bootstrap is None else bootstrap,\n", " oob_score=bootstrap_oob[1] if oob_score is None else oob_score,\n", " n_jobs=n_jobs,\n", " random_state=_random_state(_name('rstate'), random_state),\n", " verbose=verbose,\n", " )\n", " return rval\n", "\n", "def sgd(name,\n", " loss=None, # default - 'hinge'\n", " penalty=None, # default - 'l2'\n", " alpha=None, # default - 0.0001\n", " l1_ratio=None, # default - 0.15, must be within [0, 1]\n", " fit_intercept=None, # default - True\n", " n_iter=None, # default - 5\n", " shuffle=None, # default - False\n", " random_state=None, # default - None\n", " epsilon=None,\n", " n_jobs=1, # default - 1 (-1 means all CPUs)\n", " learning_rate=None, # default - 'invscaling'\n", " eta0=None, # default - 0.01\n", " power_t=None, # default - 0.5\n", " class_weight=None,\n", " warm_start=False,\n", " verbose=False,\n", " ):\n", "\n", " def _name(msg):\n", " return '%s.%s_%s' % (name, 'sgd', msg)\n", " \n", " rval = scope.sklearn_SGDClassifier(\n", " loss=hp.pchoice(\n", " _name('loss'),\n", " [ #(0.00, 'hinge'), # no probability\n", " (0.5, 'log'),\n", " (0.5, 'modified_huber'),\n", " #(0.00, 'squared_hinge'), # no probability\n", " #(0.05, 'perceptron'),\n", " #(0.05, 'squared_loss'),\n", " #(0.05, 'huber'),\n", " #(0.03, 'epsilon_insensitive'),\n", " #(0.02, 'squared_epsilon_insensitive'),\n", " ]) if loss is None else loss,\n", " penalty=hp.pchoice(\n", " _name('penalty'),\n", " [ (0.60, 'l2'),\n", " (0.15, 'l1'),\n", " (0.25, 'elasticnet') ]) if penalty is None else penalty,\n", " alpha=hp.loguniform(\n", " _name('alpha'),\n", " np.log(1e-5),\n", " np.log(1)) if alpha is None else alpha,\n", " l1_ratio=hp.uniform(\n", " _name('l1_ratio'),\n", " 0, 1) if l1_ratio is None else l1_ratio,\n", " fit_intercept=hp.pchoice(\n", " _name('fit_intercept'),\n", " [ (0.8, True), (0.2, False) ]) if fit_intercept is None else fit_intercept,\n", " learning_rate='invscaling' if learning_rate is None else learning_rate,\n", " eta0=hp.loguniform(\n", " _name('eta0'),\n", " np.log(1e-5),\n", " np.log(1e-1)) if eta0 is None else eta0,\n", " power_t=hp.uniform(\n", " _name('power_t'),\n", " 0, 1) if power_t is None else power_t,\n", " n_jobs=n_jobs,\n", " verbose=verbose,\n", " )\n", " return rval\n", "\n", "\n", "def sgd_regressor(name,\n", " loss=None, # default - 'hinge'\n", " penalty=None, # default - 'l2'\n", " alpha=None, # default - 0.0001\n", " l1_ratio=None, # default - 0.15, must be within [0, 1]\n", " fit_intercept=None, # default - True\n", " n_iter=None, # default - 5\n", " shuffle=None, # default - False\n", " random_state=None, # default - None\n", " epsilon=None,\n", " # n_jobs=1, #default - 1 (-1 means all CPUs)\n", " learning_rate=None, # default - 'invscaling'\n", " eta0=None, # default - 0.01\n", " power_t=None, # default - 0.5\n", " # class_weight=None,\n", " warm_start=False,\n", " verbose=False,\n", " ):\n", "\n", " def _name(msg):\n", " return '%s.%s_%s' % (name, 'sgd_regressor', msg)\n", " \n", " rval = scope.sklearn_SGDRegressor(\n", " loss=hp.pchoice(\n", " _name('loss'),\n", " [ (0.35, 'squared_loss'),\n", " (0.35, 'huber'),\n", " (0.20, 'epsilon_insensitive'),\n", " (0.10, 'squared_epsilon_insensitive') ]) if loss is None else loss,\n", " penalty=hp.pchoice(\n", " _name('penalty'),\n", " [ (0.40, 'l2'),\n", " (0.35, 'l1'),\n", " (0.25, 'elasticnet') ]) if penalty is None else penalty,\n", " alpha=hp.loguniform(\n", " _name('alpha'),\n", " np.log(1e-6),\n", " np.log(1)) if alpha is None else alpha,\n", " l1_ratio=hp.uniform(\n", " _name('l1_ratio'),\n", " 0, 1) if l1_ratio is None else l1_ratio,\n", " fit_intercept=hp.pchoice(\n", " _name('fit_intercept'),\n", " [ (0.8, True), (0.2, False) ]) if fit_intercept is None else fit_intercept,\n", " learning_rate='invscaling' if learning_rate is None else learning_rate,\n", " eta0=hp.loguniform(\n", " _name('eta0'),\n", " np.log(1e-5),\n", " np.log(1e-1)) if eta0 is None else eta0,\n", " power_t=hp.uniform(\n", " _name('power_t'),\n", " 0, 1) if power_t is None else power_t,\n", " # n_jobs=n_jobs,\n", " verbose=verbose,\n", " )\n", " return rval\n", "\n", "\n", "def multinomial_nb(name,\n", " alpha=None,\n", " fit_prior=None,\n", " ):\n", "\n", " def _name(msg):\n", " return '%s.%s_%s' % (name, 'multinomial_nb', msg)\n", " \n", "\n", " rval = scope.sklearn_MultinomialNB(\n", " alpha=hp.quniform(\n", " _name('alpha'),\n", " 0, 1, 0.001) if alpha is None else alpha,\n", " fit_prior=hp.choice(\n", " _name('fit_prior'),\n", " [ True, False ]) if fit_prior is None else fit_prior,\n", " )\n", " return rval\n", "\n", "def any_classifier(name):\n", " return hp.choice('%s' % name, [\n", " #svc(name + '.svc'),\n", " # knn(name + '.knn'),\n", " random_forest(name + '.random_forest', n_jobs=-1),\n", " extra_trees(name + '.extra_trees', n_jobs=-1),\n", " sgd(name + '.sgd', n_jobs=-1),\n", " ])\n", "\n", "\n", "def any_sparse_classifier(name):\n", " return hp.choice('%s' % name, [\n", " #svc(name + '.svc'),\n", " sgd(name + '.sgd', n_jobs=-1),\n", " # knn(name + '.knn', sparse_data=True),\n", " multinomial_nb(name + '.multinomial_nb')\n", " ])\n", "\n", "def any_regressor(name):\n", " return hp.choice('%s' % name, [\n", " sgd_regressor(name + '.sgd_regressor'),\n", " random_forest_regressor(name + '.random_forest_regressor', n_jobs=-1),\n", " extra_trees_regressor(name + '.extra_trees_regressor', n_jobs=-1),\n", " ])\n", "\n", "def any_sparse_regressor(name):\n", " return hp.choice('%s' % name, [\n", " sgd_regressor(name + '.sgd_regressor')\n", " ])\n", "\n", "\n", "def pca(name, n_components=None, whiten=None, max_components=np.Inf, copy=True):\n", " rval = scope.sklearn_PCA(\n", " # -- qloguniform is missing a \"scale\" parameter so we\n", " # lower the \"high\" parameter and multiply by 4 out front\n", " n_components=4 * scope.int(\n", " hp.qloguniform(\n", " name + '.n_components',\n", " low=np.log(0.51),\n", " high=np.log(min(max_components / 4, 30.5)),\n", " q=1.0)) if n_components is None else n_components,\n", " whiten=hp_bool(\n", " name + '.whiten',\n", " ) if whiten is None else whiten,\n", " copy=copy,\n", " )\n", " return rval\n", "\n", "def nystrom(name, n_components=None, kernel=None, max_components=np.Inf, copy=True):\n", " \n", " def _name(msg):\n", " return '%s.%s_%s' % (name, 'nystrom', msg)\n", " \n", " rval = scope.sklearn_Nystrom(\n", " n_components=4 * scope.int(\n", " hp.qloguniform(\n", " name + '.n_components',\n", " low=np.log(0.51),\n", " high=np.log(min(max_components / 4, 30.5)),\n", " q=1.0)) if n_components is None else n_components,\n", " kernel=hp.pchoice(\n", " _name('kernel'),\n", " [ (0.35, 'sigmoid'),\n", " (0.35, 'rbf'),\n", " (0.30, 'poly')]) if kernel is None else kernel,\n", " gamma=_svc_gamma('gamma'),\n", " coef0=hp.uniform(_name('coef0'), 0.0, 1.0)\n", " )\n", " return rval\n", "\n", "def standard_scaler(name, with_mean=None, with_std=None):\n", " rval = scope.sklearn_StandardScaler(\n", " with_mean=hp_bool(\n", " name + '.with_mean',\n", " ) if with_mean is None else with_mean,\n", " with_std=hp_bool(\n", " name + '.with_std',\n", " ) if with_std is None else with_std,\n", " )\n", " return rval\n", "\n", "def tfidf(name,\n", " analyzer=None,\n", " ngram_range=None,\n", " stop_words=None,\n", " lowercase=None,\n", " max_df=1.0,\n", " min_df=1,\n", " max_features=None,\n", " binary=None,\n", " norm=None,\n", " use_idf=False,\n", " smooth_idf=False,\n", " sublinear_tf=False,\n", " ):\n", " \n", " def _name(msg):\n", " return '%s.%s_%s' % (name, 'tfidf', msg)\n", " \n", " max_ngram = scope.int(hp.quniform(\n", " _name('max_ngram'),\n", " 1, 4, 1))\n", "\n", " rval = scope.sklearn_Tfidf(\n", " stop_words=hp.choice(\n", " _name('stop_words'),\n", " [ 'english', None ]) if analyzer is None else analyzer,\n", " lowercase=hp_bool(\n", " _name('lowercase'),\n", " ) if lowercase is None else lowercase,\n", " max_df=max_df,\n", " min_df=min_df,\n", " binary=hp_bool(\n", " _name('binary'),\n", " ) if binary is None else binary,\n", " ngram_range=(1, max_ngram) if ngram_range is None else ngram_range,\n", " norm=norm,\n", " use_idf=use_idf,\n", " smooth_idf=smooth_idf,\n", " sublinear_tf=sublinear_tf,\n", " )\n", " return rval\n", "\n", "def min_max_scaler(name, feature_range=None, copy=True):\n", " if feature_range is None:\n", " feature_range = (\n", " hp.choice(name + '.feature_min', [-1.0, 0.0]),\n", " 1.0)\n", " rval = scope.sklearn_MinMaxScaler(\n", " feature_range=feature_range,\n", " copy=copy,\n", " )\n", " return rval\n", "\n", "\n", "def normalizer(name, norm=None):\n", " rval = scope.sklearn_Normalizer(\n", " norm=hp.choice(\n", " name + '.with_mean',\n", " ['l1', 'l2'],\n", " ) if norm is None else norm,\n", " )\n", " return rval\n", "\n", "\n", "def one_hot_encoder(name,\n", " n_values=None,\n", " categorical_features=None,\n", " dtype=None):\n", " rval = scope.sklearn_OneHotEncoder(\n", " n_values='auto' if n_values is None else n_values,\n", " categorical_features=('all'\n", " if categorical_features is None\n", " else categorical_features),\n", " dtype=np.float if dtype is None else dtype,\n", " )\n", " return rval\n", "\n", "\n", "def rbm(name,\n", " n_components=None,\n", " learning_rate=None,\n", " batch_size=None,\n", " n_iter=None,\n", " verbose=False,\n", " random_state=None):\n", " rval = scope.sklearn_BernoulliRBM(\n", " n_components=scope.int(\n", " hp.qloguniform(\n", " name + '.n_components',\n", " low=np.log(0.51),\n", " high=np.log(999.5),\n", " q=1.0)) if n_components is None else n_components,\n", " learning_rate=hp.lognormal(\n", " name + '.learning_rate',\n", " np.log(0.01),\n", " np.log(10),\n", " ) if learning_rate is None else learning_rate,\n", " batch_size=scope.int(\n", " hp.qloguniform(\n", " name + '.batch_size',\n", " np.log(1),\n", " np.log(100),\n", " q=1,\n", " )) if batch_size is None else batch_size,\n", " n_iter=scope.int(\n", " hp.qloguniform(\n", " name + '.n_iter',\n", " np.log(1),\n", " np.log(1000), # -- max sweeps over the *whole* train set\n", " q=1,\n", " )) if n_iter is None else n_iter,\n", " verbose=verbose,\n", " random_state=_random_state(name + '.rstate', random_state),\n", " )\n", " return rval\n", "\n", "\n", "def colkmeans(name,\n", " n_clusters=None,\n", " init=None,\n", " n_init=None,\n", " max_iter=None,\n", " tol=None,\n", " precompute_distances=True,\n", " verbose=0,\n", " random_state=None,\n", " copy_x=True,\n", " n_jobs=1):\n", " rval = scope.sklearn_ColumnKMeans(\n", " n_clusters=scope.int(\n", " hp.qloguniform(\n", " name + '.n_clusters',\n", " low=np.log(1.51),\n", " high=np.log(19.5),\n", " q=1.0)) if n_clusters is None else n_clusters,\n", " init=hp.choice(\n", " name + '.init',\n", " ['k-means++', 'random'],\n", " ) if init is None else init,\n", " n_init=hp.choice(\n", " name + '.n_init',\n", " [1, 2, 10, 20],\n", " ) if n_init is None else n_init,\n", " max_iter=scope.int(\n", " hp.qlognormal(\n", " name + '.max_iter',\n", " np.log(300),\n", " np.log(10),\n", " q=1,\n", " )) if max_iter is None else max_iter,\n", " tol=hp.lognormal(\n", " name + '.tol',\n", " np.log(0.0001),\n", " np.log(10),\n", " ) if tol is None else tol,\n", " precompute_distances=precompute_distances,\n", " verbose=verbose,\n", " random_state=random_state,\n", " copy_x=copy_x,\n", " n_jobs=n_jobs,\n", " )\n", " return rval\n", "\n", "# XXX: todo GaussianRandomProjection\n", "# XXX: todo SparseRandomProjection\n", "\n", "\n", "def any_preprocessing(name, n_features=None, sparse=False):\n", " \"\"\"Generic pre-processing appropriate for a wide variety of data\n", " \"\"\"\n", " choices = [\n", " [nystrom(name + '.nystrom', max_components=n_features)],\n", " [standard_scaler(name + '.standard_scaler', with_mean=not sparse)],\n", " #[min_max_scaler(name + '.min_max_scaler')],\n", " [normalizer(name + '.normalizer')],\n", " [],\n", " # -- not putting in one-hot because it can make vectors huge\n", " # [one_hot_encoder(name + '.one_hot_encoder')],\n", " ] \n", " if not sparse:\n", " choices.append([pca(name + '.pca', max_components=n_features)]),\n", " return hp.choice('%s' % name, choices)\n", "\n", "def any_text_preprocessing(name):\n", " \"\"\"Generic pre-processing appropriate for text data\n", " \"\"\"\n", " return hp.choice('%s' % name, [\n", " [tfidf(name + '.tfidf')],\n", " ])\n", "\n", "def generic_space(name='space'):\n", " model = hp.pchoice('%s' % name, [\n", " (.8, {'preprocessing': [pca(name + '.pca')],\n", " 'classifier': any_classifier(name + '.pca_clsf')\n", " }),\n", " (.2, {'preprocessing': [min_max_scaler(name + '.min_max_scaler')],\n", " 'classifier': any_classifier(name + '.min_max_clsf'),\n", " }),\n", " ])\n", " return as_apply({'model': model})\n", "\n", "# -- flake8 eof\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07142857142857142, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07142857142857142, 0, 0, 0, 0, 0, 0, 0, 0, 0.07142857142857142, 0, 0, 0, 0.07142857142857142, 0, 0, 0, 0.07142857142857142, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02564102564102564, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.08888888888888889, 0, 0.02, 0.07142857142857142, 0.07142857142857142, 0.07142857142857142, 0.05128205128205128, 0.028169014084507043, 0.05, 0.02857142857142857, 0.05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.041666666666666664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02702702702702703, 0.029411764705882353, 0.02702702702702703, 0.029411764705882353, 0.023809523809523808, 0.024390243902439025, 0.029411764705882353, 0.029411764705882353, 0.03571428571428571, 0.02702702702702703, 0.029411764705882353, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02564102564102564, 0, 0.045454545454545456, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03125, 0.02857142857142857, 0.03125, 0.02857142857142857, 0.03125, 0.025, 0.02564102564102564, 0.03125, 0.03125, 0.038461538461538464, 0.02857142857142857, 0.03125, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07142857142857142, 0.027777777777777776, 0.027777777777777776, 0.027777777777777776, 0.016666666666666666, 0.023809523809523808, 0.03125, 0.02702702702702703, 0.024390243902439025, 0.05555555555555555, 0.02040816326530612, 0.02, 0.030303030303030304, 0.02857142857142857, 0.043478260869565216, 0.045454545454545456, 0.05263157894736842, 0.2857142857142857, 0, 0, 0.022222222222222223, 0.2, 0, 0, 0, 0.04081632653061224, 0, 0, 0.017543859649122806, 0.02702702702702703, 0.02564102564102564, 0.03125, 0.021739130434782608, 0.018518518518518517, 0, 0, 0, 0.03571428571428571, 0, 0.014084507042253521, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03409090909090909, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.027777777777777776, 0.027777777777777776, 0.027777777777777776, 0.016666666666666666, 0.023809523809523808, 0.03125, 0.02702702702702703, 0.024390243902439025, 0.05555555555555555, 0.01639344262295082, 0.02, 0.030303030303030304, 0.02857142857142857, 0.04, 0.045454545454545456, 0.05263157894736842, 0.2857142857142857, 0, 0, 0.01818181818181818, 0.2, 0, 0, 0, 0.02631578947368421, 0, 0, 0.024390243902439025, 0, 0, 0.03571428571428571, 0, 0.014084507042253521, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03409090909090909, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0625, 0.05, 0.2857142857142857, 0, 0, 0.017857142857142856, 0.2, 0, 0.025, 0, 0, 0, 0, 0, 0.030303030303030304, 0, 0, 0, 0.038461538461538464, 0, 0.034482758620689655, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.034482758620689655, 0, 0, 0, 0, 0, 0.04, 0, 0, 0.024096385542168676, 0.012658227848101266, 0, 0, 0.03125, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.023529411764705882, 0.2, 0, 0.02040816326530612, 0.3333333333333333, 0, 0, 0.03571428571428571, 0, 0, 0, 0, 0, 0.034482758620689655, 0.06060606060606061, 0, 0, 0, 0, 0, 0, 0, 0.017241379310344827, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0625, 0.05263157894736842, 0.045454545454545456, 0.047619047619047616, 0.05, 0.0625, 0.07142857142857142, 0.043478260869565216, 0.058823529411764705, 0.06666666666666667, 0.05263157894736842, 0.045454545454545456, 0.041666666666666664, 0.2857142857142857, 0.2, 0, 0.02127659574468085, 0.2, 0, 0, 0, 0, 0, 0, 0, 0.029411764705882353, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.017543859649122806, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.018867924528301886, 0, 0, 0, 0, 0.14285714285714285, 0, 0, 0, 0, 0.029411764705882353, 0, 0, 0, 0, 0, 0, 0.030303030303030304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
1,090
0.006866
""" This demo program illustrates how to solve Poisson's equation - div grad u(x, y) = f(x, y) on the unit square with pure Neumann boundary conditions: du/dn(x, y) = -sin(5*x) and source f given by f(x, y) = 10*exp(-((x - 0.5)^2 + (y - 0.5)^2) / 0.02) Since only Neumann conditions are applied, u is only determined up to a constant c by the above equations. An addition constraint is thus required, for instance \int u = 0 This can be accomplished by introducing the constant c as an additional unknown (to be sought in the space of real numbers) and the above constraint. """ # Copyright (C) 2010 Marie E. Rognes # # This file is part of DOLFIN. # # DOLFIN is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DOLFIN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DOLFIN. If not, see <http://www.gnu.org/licenses/>. # # Modified by Anders Logg 2011 # # First added: 2010-05-10 # Last changed: 2012-11-12 from dolfin import * # Create mesh and define function space mesh = UnitSquareMesh(64, 64) V = FunctionSpace(mesh, "CG", 1) R = FunctionSpace(mesh, "R", 0) W = V * R # Define variational problem (u, c) = TrialFunction(W) (v, d) = TestFunctions(W) f = Expression("10*exp(-(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)") g = Expression("-sin(5*x[0])") a = (inner(grad(u), grad(v)) + c*v + u*d)*dx L = f*v*dx + g*v*ds # Compute solution w = Function(W) solve(a == L, w) (u, c) = w.split() # Plot solution plot(u, interactive=True)
[ "\"\"\"\n", "This demo program illustrates how to solve Poisson's equation\n", "\n", " - div grad u(x, y) = f(x, y)\n", "\n", "on the unit square with pure Neumann boundary conditions:\n", "\n", " du/dn(x, y) = -sin(5*x)\n", "\n", "and source f given by\n", "\n", " f(x, y) = 10*exp(-((x - 0.5)^2 + (y - 0.5)^2) / 0.02)\n", "\n", "Since only Neumann conditions are applied, u is only determined up to\n", "a constant c by the above equations. An addition constraint is thus\n", "required, for instance\n", "\n", " \\int u = 0\n", "\n", "This can be accomplished by introducing the constant c as an\n", "additional unknown (to be sought in the space of real numbers)\n", "and the above constraint.\n", "\"\"\"\n", "\n", "# Copyright (C) 2010 Marie E. Rognes\n", "#\n", "# This file is part of DOLFIN.\n", "#\n", "# DOLFIN is free software: you can redistribute it and/or modify\n", "# it under the terms of the GNU Lesser General Public License as published by\n", "# the Free Software Foundation, either version 3 of the License, or\n", "# (at your option) any later version.\n", "#\n", "# DOLFIN is distributed in the hope that it will be useful,\n", "# but WITHOUT ANY WARRANTY; without even the implied warranty of\n", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n", "# GNU Lesser General Public License for more details.\n", "#\n", "# You should have received a copy of the GNU Lesser General Public License\n", "# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.\n", "#\n", "# Modified by Anders Logg 2011\n", "#\n", "# First added: 2010-05-10\n", "# Last changed: 2012-11-12\n", "\n", "from dolfin import *\n", "\n", "# Create mesh and define function space\n", "mesh = UnitSquareMesh(64, 64)\n", "V = FunctionSpace(mesh, \"CG\", 1)\n", "R = FunctionSpace(mesh, \"R\", 0)\n", "W = V * R\n", "\n", "# Define variational problem\n", "(u, c) = TrialFunction(W)\n", "(v, d) = TestFunctions(W)\n", "f = Expression(\"10*exp(-(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)\")\n", "g = Expression(\"-sin(5*x[0])\")\n", "a = (inner(grad(u), grad(v)) + c*v + u*d)*dx\n", "L = f*v*dx + g*v*ds\n", "\n", "# Compute solution\n", "w = Function(W)\n", "solve(a == L, w)\n", "(u, c) = w.split()\n", "\n", "# Plot solution\n", "plot(u, interactive=True)\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07692307692307693, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
69
0.001115
#### DOCUMENTATION GENERATOR ########################################################################## # Keeps the offline documention in synch with the online documentation. # Simply run "python update.py" to generate the latest version. import os, sys; sys.path.insert(0, os.path.join("..")) import codecs import re from pattern.web import URL, Document, strip_javascript, strip_between url = "http://www.clips.ua.ac.be/pages/" #--- HTML TEMPLATE ----------------------------------------------------------------------------------- # Use a simplified HTML template based on the online documentation. template = """ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html> <head> <title>%s</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <link type="text/css" rel="stylesheet" href="../clips.css" /> <style> /* Small fixes because we omit the online layout.css. */ h3 { line-height: 1.3em; } #page { margin-left: auto; margin-right: auto; } #header, #header-inner { height: 175px; } #header { border-bottom: 1px solid #C6D4DD; } table { border-collapse: collapse; } </style> </head> <body class="node-type-page one-sidebar sidebar-right section-pages"> <div id="page"> <div id="page-inner"> <div id="header"><div id="header-inner"></div></div> <div id="content"> <div id="content-inner"> <div class="node node-type-page" <div class="node-inner"> <div class="breadcrumb">View online at: <a href="%s" class="noexternal" target="_blank">%s</a></div> <h1>%s</h1> <!-- Parsed from the online documentation. --> %s </div> </div> </div> </div> </div> </div> </body> </html> """.strip() #--- DOWNLOAD & UPDATE ------------------------------------------------------------------------------- for p in ("-", "-web", "-db", "-en", "-nl", "-search", "-vector", "-graph", "-canvas", "-metrics", "-shell", "stop-words", "mbsp-tags", "-dev"): # We include some useful pages (Penn Treebank tags, stop words) referenced in the documentation. if p.startswith("-"): p = "pattern" + p.rstrip("-") title = p.replace("-", ".") if p == "stop-words": title = "Stop words" if p == "mbsp-tags": title = "Penn Treebank II tag set" # Download the online documentation pages. print "Retrieving", url + p html = URL(url + p).download(cached=False) # Parse the actual documentation, we don't need the website header, footer, navigation, search. html = Document(html) html = html.by_id("content-area") html = html.by_class("node-type-page")[0] html = html.source html = strip_javascript(html) html = strip_between('<div id="navbar">', '/#navbar -->', html) html = strip_between('<div id="sidebar-right">', '/#sidebar-right -->', html) html = strip_between('<div id="footer">', '/#footer -->', html) html = strip_between('<a href="http://twitter.com/share"', '</a>', html) # Link to local pages and images. # Link to online media. html = html.replace('href="/pages/MBSP"', 'href="%sMBSP"' % url) # MBSP docs (online) html = re.sub('href="/pages/(pattern-examples.*?)"', 'href="%s\\1"' % url, html) # examples (online) html = re.sub('href="/pages/(.*?)([#|"])', 'href="\\1.html\\2', html) # pages (offline) html = html.replace('src="/media/', 'src="../g/') # images (offline) html = html.replace('src="/sites/all/themes/clips/g/', 'src="../g/') # images (offline) html = html.replace('href="/media/', 'href="%smedia/' % url.replace("pages/", "")) # downloads (online) # Apply the simplified template + set page titles. html = template % (p, url+p, url+p, title, html) # Generate offline HTML file. f = codecs.open(os.path.join("html", "%s.html" % p), "w", encoding="utf-8") f.write(html) f.close() # Create index.html (which simply redirects to pattern.html). f = open("index.html", "w") f.write('<meta http-equiv="refresh" content="0; url=html/pattern.html" />') f.close()
[ "#### DOCUMENTATION GENERATOR ##########################################################################\n", "# Keeps the offline documention in synch with the online documentation.\n", "# Simply run \"python update.py\" to generate the latest version.\n", "\n", "import os, sys; sys.path.insert(0, os.path.join(\"..\"))\n", "import codecs\n", "import re\n", "\n", "from pattern.web import URL, Document, strip_javascript, strip_between\n", "\n", "url = \"http://www.clips.ua.ac.be/pages/\"\n", "\n", "#--- HTML TEMPLATE -----------------------------------------------------------------------------------\n", "# Use a simplified HTML template based on the online documentation.\n", "\n", "template = \"\"\"\n", "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n", "<html>\n", "<head>\n", " <title>%s</title>\n", " <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n", " <link type=\"text/css\" rel=\"stylesheet\" href=\"../clips.css\" />\n", " <style>\n", " /* Small fixes because we omit the online layout.css. */\n", " h3 { line-height: 1.3em; }\n", " #page { margin-left: auto; margin-right: auto; }\n", " #header, #header-inner { height: 175px; }\n", " #header { border-bottom: 1px solid #C6D4DD; }\n", " table { border-collapse: collapse; }\n", " </style>\n", "</head>\n", "<body class=\"node-type-page one-sidebar sidebar-right section-pages\">\n", " <div id=\"page\">\n", " <div id=\"page-inner\">\n", " <div id=\"header\"><div id=\"header-inner\"></div></div>\n", " <div id=\"content\">\n", " <div id=\"content-inner\">\n", " <div class=\"node node-type-page\"\n", " <div class=\"node-inner\">\n", " <div class=\"breadcrumb\">View online at: <a href=\"%s\" class=\"noexternal\" target=\"_blank\">%s</a></div>\n", " <h1>%s</h1>\n", " <!-- Parsed from the online documentation. -->\n", " %s\n", " </div>\n", " </div>\n", " </div>\n", " </div>\n", " </div>\n", " </div>\n", "</body>\n", "</html>\n", "\"\"\".strip()\n", "\n", "#--- DOWNLOAD & UPDATE -------------------------------------------------------------------------------\n", "\n", "for p in (\"-\", \"-web\", \"-db\", \"-en\", \"-nl\", \"-search\", \"-vector\", \"-graph\", \"-canvas\", \"-metrics\", \n", " \"-shell\", \"stop-words\", \"mbsp-tags\", \"-dev\"):\n", " # We include some useful pages (Penn Treebank tags, stop words) referenced in the documentation.\n", " if p.startswith(\"-\"):\n", " p = \"pattern\" + p.rstrip(\"-\")\n", " title = p.replace(\"-\", \".\")\n", " if p == \"stop-words\":\n", " title = \"Stop words\"\n", " if p == \"mbsp-tags\":\n", " title = \"Penn Treebank II tag set\"\n", " # Download the online documentation pages.\n", " print \"Retrieving\", url + p\n", " html = URL(url + p).download(cached=False)\n", " # Parse the actual documentation, we don't need the website header, footer, navigation, search.\n", " html = Document(html)\n", " html = html.by_id(\"content-area\")\n", " html = html.by_class(\"node-type-page\")[0]\n", " html = html.source\n", " html = strip_javascript(html)\n", " html = strip_between('<div id=\"navbar\">', '/#navbar -->', html)\n", " html = strip_between('<div id=\"sidebar-right\">', '/#sidebar-right -->', html)\n", " html = strip_between('<div id=\"footer\">', '/#footer -->', html)\n", " html = strip_between('<a href=\"http://twitter.com/share\"', '</a>', html)\n", " # Link to local pages and images.\n", " # Link to online media.\n", " html = html.replace('href=\"/pages/MBSP\"', 'href=\"%sMBSP\"' % url) # MBSP docs (online)\n", " html = re.sub('href=\"/pages/(pattern-examples.*?)\"', 'href=\"%s\\\\1\"' % url, html) # examples (online)\n", " html = re.sub('href=\"/pages/(.*?)([#|\"])', 'href=\"\\\\1.html\\\\2', html) # pages (offline)\n", " html = html.replace('src=\"/media/', 'src=\"../g/') # images (offline)\n", " html = html.replace('src=\"/sites/all/themes/clips/g/', 'src=\"../g/') # images (offline)\n", " html = html.replace('href=\"/media/', 'href=\"%smedia/' % url.replace(\"pages/\", \"\")) # downloads (online)\n", " # Apply the simplified template + set page titles.\n", " html = template % (p, url+p, url+p, title, html)\n", " # Generate offline HTML file.\n", " f = codecs.open(os.path.join(\"html\", \"%s.html\" % p), \"w\", encoding=\"utf-8\")\n", " f.write(html)\n", " f.close()\n", "\n", "# Create index.html (which simply redirects to pattern.html).\n", "f = open(\"index.html\", \"w\")\n", "f.write('<meta http-equiv=\"refresh\" content=\"0; url=html/pattern.html\" />')\n", "f.close()" ]
[ 0.019230769230769232, 0, 0, 0, 0.03636363636363636, 0, 0, 0, 0, 0, 0, 0, 0.019417475728155338, 0, 0, 0, 0.00909090909090909, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009174311926605505, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.019417475728155338, 0, 0.02, 0, 0.009900990099009901, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0.009259259259259259, 0.009345794392523364, 0.009523809523809525, 0.009433962264150943, 0.009433962264150943, 0.018518518518518517, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1111111111111111 ]
97
0.00352
#!/usr/bin/env python import os import requests import subprocess import unittest from dnsdisttests import DNSDistTest @unittest.skipIf('SKIP_PROMETHEUS_TESTS' in os.environ, 'Prometheus tests are disabled') class TestPrometheus(DNSDistTest): _webTimeout = 2.0 _webServerPort = 8083 _webServerBasicAuthPassword = 'secret' _webServerAPIKey = 'apisecret' _config_params = ['_testServerPort', '_webServerPort', '_webServerBasicAuthPassword', '_webServerAPIKey'] _config_template = """ newServer{address="127.0.0.1:%s"} webserver("127.0.0.1:%s") setWebserverConfig({password="%s", apiKey="%s"}) """ def checkPrometheusContentBasic(self, content): for line in content.splitlines(): if line.startswith('# HELP'): tokens = line.split(' ') self.assertGreaterEqual(len(tokens), 4) elif line.startswith('# TYPE'): tokens = line.split(' ') self.assertEquals(len(tokens), 4) self.assertIn(tokens[3], ['counter', 'gauge', 'histogram']) elif not line.startswith('#'): tokens = line.split(' ') self.assertEquals(len(tokens), 2) if not line.startswith('dnsdist_'): raise AssertionError('Expecting prometheus metric to be prefixed by \'dnsdist_\', got: "%s"' % (line)) def checkPrometheusContentPromtool(self, content): output = None try: testcmd = ['promtool', 'check', 'metrics'] process = subprocess.Popen(testcmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) output = process.communicate(input=content) except subprocess.CalledProcessError as exc: raise AssertionError('%s failed (%d): %s' % (testcmd, process.returncode, process.output)) # commented out because promtool returns 3 because of the "_total" suffix warnings #if process.returncode != 0: # raise AssertionError('%s failed (%d): %s' % (testcmd, process.returncode, output)) for line in output[0].splitlines(): if line.endswith(b"should have \"_total\" suffix"): continue raise AssertionError('%s returned an unexpected output. Faulty line is "%s", complete content is "%s"' % (testcmd, line, output)) def testMetrics(self): """ Prometheus: Retrieve metrics """ url = 'http://127.0.0.1:' + str(self._webServerPort) + '/metrics' r = requests.get(url, auth=('whatever', self._webServerBasicAuthPassword), timeout=self._webTimeout) self.assertTrue(r) self.assertEquals(r.status_code, 200) self.checkPrometheusContentBasic(r.text) self.checkPrometheusContentPromtool(r.content)
[ "#!/usr/bin/env python\n", "import os\n", "import requests\n", "import subprocess\n", "import unittest\n", "from dnsdisttests import DNSDistTest\n", "\n", "@unittest.skipIf('SKIP_PROMETHEUS_TESTS' in os.environ, 'Prometheus tests are disabled')\n", "class TestPrometheus(DNSDistTest):\n", "\n", " _webTimeout = 2.0\n", " _webServerPort = 8083\n", " _webServerBasicAuthPassword = 'secret'\n", " _webServerAPIKey = 'apisecret'\n", " _config_params = ['_testServerPort', '_webServerPort', '_webServerBasicAuthPassword', '_webServerAPIKey']\n", " _config_template = \"\"\"\n", " newServer{address=\"127.0.0.1:%s\"}\n", " webserver(\"127.0.0.1:%s\")\n", " setWebserverConfig({password=\"%s\", apiKey=\"%s\"})\n", " \"\"\"\n", "\n", " def checkPrometheusContentBasic(self, content):\n", " for line in content.splitlines():\n", " if line.startswith('# HELP'):\n", " tokens = line.split(' ')\n", " self.assertGreaterEqual(len(tokens), 4)\n", " elif line.startswith('# TYPE'):\n", " tokens = line.split(' ')\n", " self.assertEquals(len(tokens), 4)\n", " self.assertIn(tokens[3], ['counter', 'gauge', 'histogram'])\n", " elif not line.startswith('#'):\n", " tokens = line.split(' ')\n", " self.assertEquals(len(tokens), 2)\n", " if not line.startswith('dnsdist_'):\n", " raise AssertionError('Expecting prometheus metric to be prefixed by \\'dnsdist_\\', got: \"%s\"' % (line))\n", "\n", " def checkPrometheusContentPromtool(self, content):\n", " output = None\n", " try:\n", " testcmd = ['promtool', 'check', 'metrics']\n", " process = subprocess.Popen(testcmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)\n", " output = process.communicate(input=content)\n", " except subprocess.CalledProcessError as exc:\n", " raise AssertionError('%s failed (%d): %s' % (testcmd, process.returncode, process.output))\n", "\n", " # commented out because promtool returns 3 because of the \"_total\" suffix warnings\n", " #if process.returncode != 0:\n", " # raise AssertionError('%s failed (%d): %s' % (testcmd, process.returncode, output))\n", "\n", " for line in output[0].splitlines():\n", " if line.endswith(b\"should have \\\"_total\\\" suffix\"):\n", " continue\n", " raise AssertionError('%s returned an unexpected output. Faulty line is \"%s\", complete content is \"%s\"' % (testcmd, line, output))\n", "\n", " def testMetrics(self):\n", " \"\"\"\n", " Prometheus: Retrieve metrics\n", " \"\"\"\n", " url = 'http://127.0.0.1:' + str(self._webServerPort) + '/metrics'\n", " r = requests.get(url, auth=('whatever', self._webServerBasicAuthPassword), timeout=self._webTimeout)\n", " self.assertTrue(r)\n", " self.assertEquals(r.status_code, 200)\n", " self.checkPrometheusContentBasic(r.text)\n", " self.checkPrometheusContentPromtool(r.content)\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0.02247191011235955, 0, 0, 0, 0, 0, 0, 0.00909090909090909, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.008130081300813009, 0, 0, 0, 0, 0, 0.0072992700729927005, 0, 0, 0.009708737864077669, 0, 0.01098901098901099, 0.02702702702702703, 0.010638297872340425, 0, 0, 0, 0, 0.007042253521126761, 0, 0, 0, 0, 0, 0, 0.009174311926605505, 0, 0, 0, 0 ]
64
0.0019
# -*- coding: UTF-8 -*- # Copyright 2013-2018 Rumma & Ko Ltd # This file is part of Lino Voga. # # Lino Voga is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # Lino Voga is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public # License along with Lino Voga. If not, see # <http://www.gnu.org/licenses/>. """ The main module of :ref:`voga`. .. autosummary:: :toctree: lib """ import os fn = os.path.join(os.path.dirname(__file__), 'setup_info.py') exec(compile(open(fn, "rb").read(), fn, 'exec')) __version__ = SETUP_INFO['version'] intersphinx_urls = dict(docs="http://voga.lino-framework.org") srcref_url = 'https://github.com/lino-framework/voga/blob/master/%s' doc_trees = ['docs']
[ "# -*- coding: UTF-8 -*-\n", "# Copyright 2013-2018 Rumma & Ko Ltd\n", "# This file is part of Lino Voga.\n", "#\n", "# Lino Voga is free software: you can redistribute it and/or modify\n", "# it under the terms of the GNU Affero General Public License as\n", "# published by the Free Software Foundation, either version 3 of the\n", "# License, or (at your option) any later version.\n", "#\n", "# Lino Voga is distributed in the hope that it will be useful, but\n", "# WITHOUT ANY WARRANTY; without even the implied warranty of\n", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n", "# Affero General Public License for more details.\n", "#\n", "# You should have received a copy of the GNU Affero General Public\n", "# License along with Lino Voga. If not, see\n", "# <http://www.gnu.org/licenses/>.\n", "\n", "\"\"\"\n", "The main module of :ref:`voga`.\n", "\n", ".. autosummary::\n", " :toctree:\n", "\n", " lib\n", "\n", "\"\"\"\n", "\n", "import os\n", "\n", "fn = os.path.join(os.path.dirname(__file__), 'setup_info.py')\n", "exec(compile(open(fn, \"rb\").read(), fn, 'exec'))\n", "\n", "__version__ = SETUP_INFO['version']\n", "\n", "intersphinx_urls = dict(docs=\"http://voga.lino-framework.org\")\n", "srcref_url = 'https://github.com/lino-framework/voga/blob/master/%s'\n", "doc_trees = ['docs']\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
38
0
#----------------------------------------------------------------------- #Copyright 2013 Centrum Wiskunde & Informatica, Amsterdam # #Author: Daniel M. Pelt #Contact: D.M.Pelt@cwi.nl #Website: http://dmpelt.github.io/pyastratoolbox/ # # #This file is part of the Python interface to the #All Scale Tomographic Reconstruction Antwerp Toolbox ("ASTRA Toolbox"). # #The Python interface to the ASTRA Toolbox is free software: you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation, either version 3 of the License, or #(at your option) any later version. # #The Python interface to the ASTRA Toolbox is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. # #You should have received a copy of the GNU General Public License #along with the Python interface to the ASTRA Toolbox. If not, see <http://www.gnu.org/licenses/>. # #----------------------------------------------------------------------- import astra import numpy as np vol_geom = astra.create_vol_geom(256, 256) proj_geom = astra.create_proj_geom('parallel', 3.0, 128, np.linspace(0,np.pi,180,False)) import scipy.io P = scipy.io.loadmat('phantom.mat')['phantom256'] # Because the astra.create_sino method does not have support for # all possible algorithm options, we manually create a sinogram phantom_id = astra.data2d.create('-vol', vol_geom, P) sinogram_id = astra.data2d.create('-sino', proj_geom) cfg = astra.astra_dict('FP_CUDA') cfg['VolumeDataId'] = phantom_id cfg['ProjectionDataId'] = sinogram_id # Set up 3 rays per detector element cfg['option'] = {} cfg['option']['DetectorSuperSampling'] = 3 alg_id = astra.algorithm.create(cfg) astra.algorithm.run(alg_id) astra.algorithm.delete(alg_id) astra.data2d.delete(phantom_id) sinogram3 = astra.data2d.get(sinogram_id) import pylab pylab.gray() pylab.figure(1) pylab.imshow(P) pylab.figure(2) pylab.imshow(sinogram3) # Create a reconstruction, also using supersampling rec_id = astra.data2d.create('-vol', vol_geom) cfg = astra.astra_dict('SIRT_CUDA') cfg['ReconstructionDataId'] = rec_id cfg['ProjectionDataId'] = sinogram_id # Set up 3 rays per detector element cfg['option'] = {} cfg['option']['DetectorSuperSampling'] = 3 # There is also an option for supersampling during the backprojection step. # This should be used if your detector pixels are smaller than the voxels. # Set up 2 rays per image pixel dimension, for 4 rays total per image pixel. # cfg['option']['PixelSuperSampling'] = 2 alg_id = astra.algorithm.create(cfg) astra.algorithm.run(alg_id, 150) astra.algorithm.delete(alg_id) rec = astra.data2d.get(rec_id) pylab.figure(3) pylab.imshow(rec) pylab.show()
[ "#-----------------------------------------------------------------------\n", "#Copyright 2013 Centrum Wiskunde & Informatica, Amsterdam\n", "#\n", "#Author: Daniel M. Pelt\n", "#Contact: D.M.Pelt@cwi.nl\n", "#Website: http://dmpelt.github.io/pyastratoolbox/\n", "#\n", "#\n", "#This file is part of the Python interface to the\n", "#All Scale Tomographic Reconstruction Antwerp Toolbox (\"ASTRA Toolbox\").\n", "#\n", "#The Python interface to the ASTRA Toolbox is free software: you can redistribute it and/or modify\n", "#it under the terms of the GNU General Public License as published by\n", "#the Free Software Foundation, either version 3 of the License, or\n", "#(at your option) any later version.\n", "#\n", "#The Python interface to the ASTRA Toolbox is distributed in the hope that it will be useful,\n", "#but WITHOUT ANY WARRANTY; without even the implied warranty of\n", "#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n", "#GNU General Public License for more details.\n", "#\n", "#You should have received a copy of the GNU General Public License\n", "#along with the Python interface to the ASTRA Toolbox. If not, see <http://www.gnu.org/licenses/>.\n", "#\n", "#-----------------------------------------------------------------------\n", "\n", "import astra\n", "import numpy as np\n", "\n", "vol_geom = astra.create_vol_geom(256, 256)\n", "proj_geom = astra.create_proj_geom('parallel', 3.0, 128, np.linspace(0,np.pi,180,False))\n", "import scipy.io\n", "P = scipy.io.loadmat('phantom.mat')['phantom256']\n", "\n", "# Because the astra.create_sino method does not have support for\n", "# all possible algorithm options, we manually create a sinogram\n", "phantom_id = astra.data2d.create('-vol', vol_geom, P)\n", "sinogram_id = astra.data2d.create('-sino', proj_geom)\n", "cfg = astra.astra_dict('FP_CUDA')\n", "cfg['VolumeDataId'] = phantom_id\n", "cfg['ProjectionDataId'] = sinogram_id\n", "\n", "# Set up 3 rays per detector element\n", "cfg['option'] = {}\n", "cfg['option']['DetectorSuperSampling'] = 3\n", "\n", "alg_id = astra.algorithm.create(cfg)\n", "astra.algorithm.run(alg_id)\n", "astra.algorithm.delete(alg_id)\n", "astra.data2d.delete(phantom_id)\n", "\n", "sinogram3 = astra.data2d.get(sinogram_id)\n", "\n", "import pylab\n", "pylab.gray()\n", "pylab.figure(1)\n", "pylab.imshow(P)\n", "pylab.figure(2)\n", "pylab.imshow(sinogram3)\n", "\n", "# Create a reconstruction, also using supersampling\n", "rec_id = astra.data2d.create('-vol', vol_geom)\n", "cfg = astra.astra_dict('SIRT_CUDA')\n", "cfg['ReconstructionDataId'] = rec_id\n", "cfg['ProjectionDataId'] = sinogram_id\n", "# Set up 3 rays per detector element\n", "cfg['option'] = {}\n", "cfg['option']['DetectorSuperSampling'] = 3\n", "\n", "# There is also an option for supersampling during the backprojection step.\n", "# This should be used if your detector pixels are smaller than the voxels.\n", "\n", "# Set up 2 rays per image pixel dimension, for 4 rays total per image pixel.\n", "# cfg['option']['PixelSuperSampling'] = 2\n", "\n", "\n", "alg_id = astra.algorithm.create(cfg)\n", "astra.algorithm.run(alg_id, 150)\n", "astra.algorithm.delete(alg_id)\n", "\n", "rec = astra.data2d.get(rec_id)\n", "pylab.figure(3)\n", "pylab.imshow(rec)\n", "pylab.show()\n", "\n" ]
[ 0.0136986301369863, 0.017241379310344827, 0, 0.041666666666666664, 0.038461538461538464, 0.02, 0, 0, 0.02, 0.0136986301369863, 0, 0.020202020202020204, 0.014285714285714285, 0.014925373134328358, 0.02702702702702703, 0, 0.02127659574468085, 0.015625, 0.016129032258064516, 0.021739130434782608, 0, 0.014925373134328358, 0.020202020202020204, 0, 0.0136986301369863, 0, 0, 0, 0, 0, 0.0449438202247191, 0.0625, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07692307692307693, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ]
85
0.018226
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. from marionette_driver import By, keys, Wait from firefox_puppeteer.ui_base_lib import UIBaseLib class NavBar(UIBaseLib): """Provides access to the DOM elements contained in the navigation bar as well as the location bar.""" def __init__(self, *args, **kwargs): UIBaseLib.__init__(self, *args, **kwargs) self._locationbar = None @property def back_button(self): """Provides access to the DOM element back button in the navbar. :returns: Reference to the back button. """ return self.marionette.find_element(By.ID, 'back-button') @property def forward_button(self): """Provides access to the DOM element forward button in the navbar. :returns: Reference to the forward button. """ return self.marionette.find_element(By.ID, 'forward-button') @property def home_button(self): """Provides access to the DOM element home button in the navbar. :returns: Reference to the home button element """ return self.marionette.find_element(By.ID, 'home-button') @property def locationbar(self): """Provides access to the DOM elements contained in the locationbar. See the :class:`LocationBar` reference. """ if not self._locationbar: urlbar = self.marionette.find_element(By.ID, 'urlbar') self._locationbar = LocationBar(lambda: self.marionette, self.window, urlbar) return self._locationbar @property def menu_button(self): """Provides access to the DOM element menu button in the navbar. :returns: Reference to the menu button element. """ return self.marionette.find_element(By.ID, 'PanelUI-menu-button') @property def toolbar(self): """The DOM element which represents the navigation toolbar. :returns: Reference to the navigation toolbar. """ return self.element class LocationBar(UIBaseLib): """Provides access to and methods for the DOM elements contained in the locationbar (the text area of the ui that typically displays the current url).""" def __init__(self, *args, **kwargs): UIBaseLib.__init__(self, *args, **kwargs) self._autocomplete_results = None self._identity_popup = None @property def autocomplete_results(self): """Provides access to and methods for the location bar autocomplete results. See the :class:`AutocompleteResults` reference.""" if not self._autocomplete_results: popup = self.marionette.find_element(By.ID, 'PopupAutoCompleteRichResult') self._autocomplete_results = AutocompleteResults(lambda: self.marionette, self.window, popup) return self._autocomplete_results def clear(self): """Clears the contents of the url bar (via the DELETE shortcut).""" self.focus('shortcut') self.urlbar.send_keys(keys.Keys.DELETE) Wait(self.marionette).until(lambda _: self.urlbar.get_attribute('value') == '') def close_context_menu(self): """Closes the Location Bar context menu by a key event.""" # TODO: This method should be implemented via the menu API. self.contextmenu.send_keys(keys.Keys.ESCAPE) @property def connection_icon(self): """ Provides access to the urlbar connection icon. :returns: Reference to the connection icon element. """ return self.marionette.find_element(By.ID, 'connection-icon') @property def contextmenu(self): """Provides access to the urlbar context menu. :returns: Reference to the urlbar context menu. """ # TODO: This method should be implemented via the menu API. parent = self.urlbar.find_element(By.ANON_ATTRIBUTE, {'anonid': 'textbox-input-box'}) return parent.find_element(By.ANON_ATTRIBUTE, {'anonid': 'input-box-contextmenu'}) @property def focused(self): """Checks the focus state of the location bar. :returns: `True` if focused, otherwise `False` """ return self.urlbar.get_attribute('focused') == 'true' @property def identity_icon(self): """ Provides access to the urlbar identity icon. :returns: Reference to the identity icon element. """ return self.marionette.find_element(By.ID, 'identity-icon') def focus(self, event='click'): """Focus the location bar according to the provided event. :param eventt: The event to synthesize in order to focus the urlbar (one of `click` or `shortcut`). """ if event == 'click': self.urlbar.click() elif event == 'shortcut': cmd_key = self.window.get_entity('openCmd.commandkey') self.window.send_shortcut(cmd_key, accel=True) else: raise ValueError("An unknown event type was passed: %s" % event) Wait(self.marionette).until(lambda _: self.focused) def get_contextmenu_entry(self, action): """Retrieves the urlbar context menu entry corresponding to the given action. :param action: The action corresponding to the retrieved value. :returns: Reference to the urlbar contextmenu entry. """ # TODO: This method should be implemented via the menu API. entries = self.contextmenu.find_elements(By.CSS_SELECTOR, 'menuitem') filter_on = 'cmd_%s' % action found = [e for e in entries if e.get_attribute('cmd') == filter_on] return found[0] if len(found) else None @property def history_drop_marker(self): """Provides access to the history drop marker. :returns: Reference to the history drop marker. """ return self.urlbar.find_element(By.ANON_ATTRIBUTE, {'anonid': 'historydropmarker'}) @property def identity_box(self): """The DOM element which represents the identity box. :returns: Reference to the identity box. """ return self.marionette.find_element(By.ID, 'identity-box') @property def identity_country_label(self): """The DOM element which represents the identity icon country label. :returns: Reference to the identity icon country label. """ return self.marionette.find_element(By.ID, 'identity-icon-country-label') @property def identity_organization_label(self): """The DOM element which represents the identity icon label. :returns: Reference to the identity icon label. """ return self.marionette.find_element(By.ID, 'identity-icon-label') @property def identity_popup(self): """Provides utility members for accessing and manipulating the identity popup. See the :class:`IdentityPopup` reference. """ if not self._identity_popup: popup = self.marionette.find_element(By.ID, 'identity-popup') self._identity_popup = IdentityPopup(lambda: self.marionette, self.window, popup) return self._identity_popup def load_url(self, url): """Load the specified url in the location bar by synthesized keystrokes. :param url: The url to load. """ self.clear() self.focus('shortcut') self.urlbar.send_keys(url + keys.Keys.ENTER) @property def notification_popup(self): """Provides access to the DOM element notification popup. :returns: Reference to the notification popup. """ return self.marionette.find_element(By.ID, "notification-popup") def open_identity_popup(self): self.identity_box.click() Wait(self.marionette).until(lambda _: self.identity_popup.is_open) @property def reload_button(self): """Provides access to the DOM element reload button. :returns: Reference to the reload button. """ return self.marionette.find_element(By.ID, 'urlbar-reload-button') def reload_url(self, trigger='button', force=False): """Reload the currently open page. :param trigger: The event type to use to cause the reload (one of `shortcut`, `shortcut2`, or `button`). :param force: Whether to cause a forced reload. """ # TODO: The force parameter is ignored for the moment. Use # mouse event modifiers or actions when they're ready. # Bug 1097705 tracks this feature in marionette. if trigger == 'button': self.reload_button.click() elif trigger == 'shortcut': cmd_key = self.window.get_entity('reloadCmd.commandkey') self.window.send_shortcut(cmd_key) elif trigger == 'shortcut2': self.window.send_shortcut(keys.Keys.F5) @property def stop_button(self): """Provides access to the DOM element stop button. :returns: Reference to the stop button. """ return self.marionette.find_element(By.ID, 'urlbar-stop-button') @property def urlbar(self): """Provides access to the DOM element urlbar. :returns: Reference to the url bar. """ return self.marionette.find_element(By.ID, 'urlbar') @property def urlbar_input(self): """Provides access to the urlbar input element. :returns: Reference to the urlbar input. """ return self.urlbar.find_element(By.ANON_ATTRIBUTE, {'anonid': 'input'}) @property def value(self): """Provides access to the currently displayed value of the urlbar. :returns: The urlbar value. """ return self.urlbar.get_attribute('value') class AutocompleteResults(UIBaseLib): """Wraps DOM elements and methods for interacting with autocomplete results.""" def close(self, force=False): """Closes the urlbar autocomplete popup. :param force: If true, the popup is closed by its own hide function, otherwise a key event is sent to close the popup. """ if not self.is_open: return if force: self.marionette.execute_script(""" arguments[0].hidePopup(); """, script_args=[self.element]) else: self.element.send_keys(keys.Keys.ESCAPE) Wait(self.marionette).until(lambda _: not self.is_open) def get_matching_text(self, result, match_type): """Returns an array of strings of the matching text within an autocomplete result in the urlbar. :param result: The result to inspect for matches. :param match_type: The type of match to search for (one of `title` or `url`). """ if match_type == 'title': descnode = self.marionette.execute_script(""" return arguments[0].boxObject.firstChild.childNodes[1].childNodes[0]; """, script_args=[result]) elif match_type == 'url': descnode = self.marionette.execute_script(""" return arguments[0].boxObject.lastChild.childNodes[2].childNodes[0]; """, script_args=[result]) else: raise ValueError('match_type provided must be one of' '"title" or "url", not %s' % match_type) return self.marionette.execute_script(""" let rv = []; for (let node of arguments[0].childNodes) { if (node.nodeName == 'span') { rv.push(node.innerHTML); } } return rv; """, script_args=[descnode]) @property def visible_results(self): """Supplies the list of visible autocomplete result nodes. :returns: The list of visible results. """ return self.marionette.execute_script(""" let rv = []; let node = arguments[0]; for (let i = 0; i < node.itemCount; ++i) { let item = node.getItemAtIndex(i); if (!item.hasAttribute("collapsed")) { rv.push(item); } } return rv; """, script_args=[self.results]) @property def is_open(self): """Returns whether this popup is currently open. :returns: True when the popup is open, otherwise false. """ return self.element.get_attribute('state') == 'open' @property def is_complete(self): """Returns when this popup is open and autocomplete results are complete. :returns: True, when autocomplete results have been populated. """ return self.marionette.execute_script(""" Components.utils.import("resource://gre/modules/Services.jsm"); let win = Services.focus.activeWindow; if (win) { return win.gURLBar.controller.searchStatus >= Components.interfaces.nsIAutoCompleteController.STATUS_COMPLETE_NO_MATCH; } return null; """) @property def results(self): """ :returns: The autocomplete result container node. """ return self.element.find_element(By.ANON_ATTRIBUTE, {'anonid': 'richlistbox'}) @property def selected_index(self): """Provides the index of the selected item in the autocomplete list. :returns: The index. """ return self.results.get_attribute('selectedIndex') class IdentityPopup(UIBaseLib): """Wraps DOM elements and methods for interacting with the identity popup.""" def __init__(self, *args, **kwargs): UIBaseLib.__init__(self, *args, **kwargs) self._view = None @property def host(self): """The DOM element which represents the identity-popup content host. :returns: Reference to the identity-popup content host. """ return self.marionette.find_element(By.ID, 'identity-popup-content-host') @property def is_open(self): """Returns whether this popup is currently open. :returns: True when the popup is open, otherwise false. """ return self.element.get_attribute('state') == 'open' def close(self, force=False): """Closes the identity popup by hitting the escape key. :param force: Optional, If `True` force close the popup. Defaults to `False` """ if not self.is_open: return if force: self.marionette.execute_script(""" arguments[0].hidePopup(); """, script_args=[self.element]) else: self.element.send_keys(keys.Keys.ESCAPE) Wait(self.marionette).until(lambda _: not self.is_open) @property def view(self): """Provides utility members for accessing and manipulating the identity popup's multi view. See the :class:`IdentityPopupMultiView` reference. """ if not self._view: view = self.marionette.find_element(By.ID, 'identity-popup-multiView') self._view = IdentityPopupMultiView(lambda: self.marionette, self.window, view) return self._view class IdentityPopupMultiView(UIBaseLib): def _create_view_for_id(self, view_id): """Creates an instance of :class:`IdentityPopupView` for the specified view id. :param view_id: The ID of the view to create an instance of. :returns: :class:`IdentityPopupView` instance """ mapping = {'identity-popup-mainView': IdentityPopupMainView, 'identity-popup-securityView': IdentityPopupSecurityView, } view = self.marionette.find_element(By.ID, view_id) return mapping.get(view_id, IdentityPopupView)(lambda: self.marionette, self.window, view) @property def main(self): """The DOM element which represents the main view. :returns: Reference to the main view. """ return self._create_view_for_id('identity-popup-mainView') @property def security(self): """The DOM element which represents the security view. :returns: Reference to the security view. """ return self._create_view_for_id('identity-popup-securityView') class IdentityPopupView(UIBaseLib): @property def selected(self): """Checks if the view is selected. :return: `True` if the view is selected. """ return self.element.get_attribute('current') == 'true' class IdentityPopupMainView(IdentityPopupView): @property def selected(self): """Checks if the view is selected. :return: `True` if the view is selected. """ return self.marionette.execute_script(""" return arguments[0].panelMultiView.getAttribute('viewtype') == 'main'; """, script_args=[self.element]) @property def expander(self): """The DOM element which represents the expander button for the security content. :returns: Reference to the identity popup expander button. """ return self.element.find_element(By.CLASS_NAME, 'identity-popup-expander') @property def insecure_connection_label(self): """The DOM element which represents the identity popup insecure connection label. :returns: Reference to the identity-popup insecure connection label. """ return self.element.find_element(By.CLASS_NAME, 'identity-popup-connection-not-secure') @property def internal_connection_label(self): """The DOM element which represents the identity popup internal connection label. :returns: Reference to the identity-popup internal connection label. """ return self.element.find_element(By.CSS_SELECTOR, 'description[when-connection=chrome]') @property def permissions(self): """The DOM element which represents the identity-popup permissions content. :returns: Reference to the identity-popup permissions. """ return self.element.find_element(By.ID, 'identity-popup-permissions-content') @property def secure_connection_label(self): """The DOM element which represents the identity popup secure connection label. :returns: Reference to the identity-popup secure connection label. """ return self.element.find_element(By.CLASS_NAME, 'identity-popup-connection-secure') class IdentityPopupSecurityView(IdentityPopupView): @property def disable_mixed_content_blocking_button(self): """The DOM element which represents the disable mixed content blocking button. :returns: Reference to the disable mixed content blocking button. """ return self.element.find_element(By.CSS_SELECTOR, 'button[when-mixedcontent=active-blocked]') @property def enable_mixed_content_blocking_button(self): """The DOM element which represents the enable mixed content blocking button. :returns: Reference to the enable mixed content blocking button. """ return self.element.find_element(By.CSS_SELECTOR, 'button[when-mixedcontent=active-loaded]') @property def insecure_connection_label(self): """The DOM element which represents the identity popup insecure connection label. :returns: Reference to the identity-popup insecure connection label. """ return self.element.find_element(By.CLASS_NAME, 'identity-popup-connection-not-secure') @property def more_info_button(self): """The DOM element which represents the identity-popup more info button. :returns: Reference to the identity-popup more info button. """ label = self.window.get_entity('identity.moreInfoLinkText2') return self.element.find_element(By.CSS_SELECTOR, u'button[label="{}"]'.format(label)) @property def owner(self): """The DOM element which represents the identity-popup content owner. :returns: Reference to the identity-popup content owner. """ return self.element.find_element(By.ID, 'identity-popup-content-owner') @property def owner_location(self): """The DOM element which represents the identity-popup content supplemental. :returns: Reference to the identity-popup content supplemental. """ return self.element.find_element(By.ID, 'identity-popup-content-supplemental') @property def secure_connection_label(self): """The DOM element which represents the identity popup secure connection label. :returns: Reference to the identity-popup secure connection label. """ return self.element.find_element(By.CLASS_NAME, 'identity-popup-connection-secure') @property def verifier(self): """The DOM element which represents the identity-popup content verifier. :returns: Reference to the identity-popup content verifier. """ return self.element.find_element(By.ID, 'identity-popup-content-verifier')
[ "# This Source Code Form is subject to the terms of the Mozilla Public\n", "# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n", "# You can obtain one at http://mozilla.org/MPL/2.0/.\n", "\n", "from marionette_driver import By, keys, Wait\n", "\n", "from firefox_puppeteer.ui_base_lib import UIBaseLib\n", "\n", "\n", "class NavBar(UIBaseLib):\n", " \"\"\"Provides access to the DOM elements contained in the\n", " navigation bar as well as the location bar.\"\"\"\n", "\n", " def __init__(self, *args, **kwargs):\n", " UIBaseLib.__init__(self, *args, **kwargs)\n", "\n", " self._locationbar = None\n", "\n", " @property\n", " def back_button(self):\n", " \"\"\"Provides access to the DOM element back button in the navbar.\n", "\n", " :returns: Reference to the back button.\n", " \"\"\"\n", " return self.marionette.find_element(By.ID, 'back-button')\n", "\n", " @property\n", " def forward_button(self):\n", " \"\"\"Provides access to the DOM element forward button in the navbar.\n", "\n", " :returns: Reference to the forward button.\n", " \"\"\"\n", " return self.marionette.find_element(By.ID, 'forward-button')\n", "\n", " @property\n", " def home_button(self):\n", " \"\"\"Provides access to the DOM element home button in the navbar.\n", "\n", " :returns: Reference to the home button element\n", " \"\"\"\n", " return self.marionette.find_element(By.ID, 'home-button')\n", "\n", " @property\n", " def locationbar(self):\n", " \"\"\"Provides access to the DOM elements contained in the\n", " locationbar.\n", "\n", " See the :class:`LocationBar` reference.\n", " \"\"\"\n", " if not self._locationbar:\n", " urlbar = self.marionette.find_element(By.ID, 'urlbar')\n", " self._locationbar = LocationBar(lambda: self.marionette, self.window, urlbar)\n", "\n", " return self._locationbar\n", "\n", " @property\n", " def menu_button(self):\n", " \"\"\"Provides access to the DOM element menu button in the navbar.\n", "\n", " :returns: Reference to the menu button element.\n", " \"\"\"\n", " return self.marionette.find_element(By.ID, 'PanelUI-menu-button')\n", "\n", " @property\n", " def toolbar(self):\n", " \"\"\"The DOM element which represents the navigation toolbar.\n", "\n", " :returns: Reference to the navigation toolbar.\n", " \"\"\"\n", " return self.element\n", "\n", "\n", "class LocationBar(UIBaseLib):\n", " \"\"\"Provides access to and methods for the DOM elements contained in the\n", " locationbar (the text area of the ui that typically displays the current url).\"\"\"\n", "\n", " def __init__(self, *args, **kwargs):\n", " UIBaseLib.__init__(self, *args, **kwargs)\n", "\n", " self._autocomplete_results = None\n", " self._identity_popup = None\n", "\n", " @property\n", " def autocomplete_results(self):\n", " \"\"\"Provides access to and methods for the location bar\n", " autocomplete results.\n", "\n", " See the :class:`AutocompleteResults` reference.\"\"\"\n", " if not self._autocomplete_results:\n", " popup = self.marionette.find_element(By.ID, 'PopupAutoCompleteRichResult')\n", " self._autocomplete_results = AutocompleteResults(lambda: self.marionette,\n", " self.window, popup)\n", "\n", " return self._autocomplete_results\n", "\n", " def clear(self):\n", " \"\"\"Clears the contents of the url bar (via the DELETE shortcut).\"\"\"\n", " self.focus('shortcut')\n", " self.urlbar.send_keys(keys.Keys.DELETE)\n", " Wait(self.marionette).until(lambda _: self.urlbar.get_attribute('value') == '')\n", "\n", " def close_context_menu(self):\n", " \"\"\"Closes the Location Bar context menu by a key event.\"\"\"\n", " # TODO: This method should be implemented via the menu API.\n", " self.contextmenu.send_keys(keys.Keys.ESCAPE)\n", "\n", " @property\n", " def connection_icon(self):\n", " \"\"\" Provides access to the urlbar connection icon.\n", "\n", " :returns: Reference to the connection icon element.\n", " \"\"\"\n", " return self.marionette.find_element(By.ID, 'connection-icon')\n", "\n", " @property\n", " def contextmenu(self):\n", " \"\"\"Provides access to the urlbar context menu.\n", "\n", " :returns: Reference to the urlbar context menu.\n", " \"\"\"\n", " # TODO: This method should be implemented via the menu API.\n", " parent = self.urlbar.find_element(By.ANON_ATTRIBUTE, {'anonid': 'textbox-input-box'})\n", " return parent.find_element(By.ANON_ATTRIBUTE, {'anonid': 'input-box-contextmenu'})\n", "\n", " @property\n", " def focused(self):\n", " \"\"\"Checks the focus state of the location bar.\n", "\n", " :returns: `True` if focused, otherwise `False`\n", " \"\"\"\n", " return self.urlbar.get_attribute('focused') == 'true'\n", "\n", " @property\n", " def identity_icon(self):\n", " \"\"\" Provides access to the urlbar identity icon.\n", "\n", " :returns: Reference to the identity icon element.\n", " \"\"\"\n", " return self.marionette.find_element(By.ID, 'identity-icon')\n", "\n", " def focus(self, event='click'):\n", " \"\"\"Focus the location bar according to the provided event.\n", "\n", " :param eventt: The event to synthesize in order to focus the urlbar\n", " (one of `click` or `shortcut`).\n", " \"\"\"\n", " if event == 'click':\n", " self.urlbar.click()\n", " elif event == 'shortcut':\n", " cmd_key = self.window.get_entity('openCmd.commandkey')\n", " self.window.send_shortcut(cmd_key, accel=True)\n", " else:\n", " raise ValueError(\"An unknown event type was passed: %s\" % event)\n", "\n", " Wait(self.marionette).until(lambda _: self.focused)\n", "\n", " def get_contextmenu_entry(self, action):\n", " \"\"\"Retrieves the urlbar context menu entry corresponding\n", " to the given action.\n", "\n", " :param action: The action corresponding to the retrieved value.\n", " :returns: Reference to the urlbar contextmenu entry.\n", " \"\"\"\n", " # TODO: This method should be implemented via the menu API.\n", " entries = self.contextmenu.find_elements(By.CSS_SELECTOR, 'menuitem')\n", " filter_on = 'cmd_%s' % action\n", " found = [e for e in entries if e.get_attribute('cmd') == filter_on]\n", " return found[0] if len(found) else None\n", "\n", " @property\n", " def history_drop_marker(self):\n", " \"\"\"Provides access to the history drop marker.\n", "\n", " :returns: Reference to the history drop marker.\n", " \"\"\"\n", " return self.urlbar.find_element(By.ANON_ATTRIBUTE, {'anonid': 'historydropmarker'})\n", "\n", " @property\n", " def identity_box(self):\n", " \"\"\"The DOM element which represents the identity box.\n", "\n", " :returns: Reference to the identity box.\n", " \"\"\"\n", " return self.marionette.find_element(By.ID, 'identity-box')\n", "\n", " @property\n", " def identity_country_label(self):\n", " \"\"\"The DOM element which represents the identity icon country label.\n", "\n", " :returns: Reference to the identity icon country label.\n", " \"\"\"\n", " return self.marionette.find_element(By.ID, 'identity-icon-country-label')\n", "\n", " @property\n", " def identity_organization_label(self):\n", " \"\"\"The DOM element which represents the identity icon label.\n", "\n", " :returns: Reference to the identity icon label.\n", " \"\"\"\n", " return self.marionette.find_element(By.ID, 'identity-icon-label')\n", "\n", " @property\n", " def identity_popup(self):\n", " \"\"\"Provides utility members for accessing and manipulating the\n", " identity popup.\n", "\n", " See the :class:`IdentityPopup` reference.\n", " \"\"\"\n", " if not self._identity_popup:\n", " popup = self.marionette.find_element(By.ID, 'identity-popup')\n", " self._identity_popup = IdentityPopup(lambda: self.marionette,\n", " self.window, popup)\n", "\n", " return self._identity_popup\n", "\n", " def load_url(self, url):\n", " \"\"\"Load the specified url in the location bar by synthesized\n", " keystrokes.\n", "\n", " :param url: The url to load.\n", " \"\"\"\n", " self.clear()\n", " self.focus('shortcut')\n", " self.urlbar.send_keys(url + keys.Keys.ENTER)\n", "\n", " @property\n", " def notification_popup(self):\n", " \"\"\"Provides access to the DOM element notification popup.\n", "\n", " :returns: Reference to the notification popup.\n", " \"\"\"\n", " return self.marionette.find_element(By.ID, \"notification-popup\")\n", "\n", " def open_identity_popup(self):\n", " self.identity_box.click()\n", " Wait(self.marionette).until(lambda _: self.identity_popup.is_open)\n", "\n", " @property\n", " def reload_button(self):\n", " \"\"\"Provides access to the DOM element reload button.\n", "\n", " :returns: Reference to the reload button.\n", " \"\"\"\n", " return self.marionette.find_element(By.ID, 'urlbar-reload-button')\n", "\n", " def reload_url(self, trigger='button', force=False):\n", " \"\"\"Reload the currently open page.\n", "\n", " :param trigger: The event type to use to cause the reload (one of\n", " `shortcut`, `shortcut2`, or `button`).\n", " :param force: Whether to cause a forced reload.\n", " \"\"\"\n", " # TODO: The force parameter is ignored for the moment. Use\n", " # mouse event modifiers or actions when they're ready.\n", " # Bug 1097705 tracks this feature in marionette.\n", " if trigger == 'button':\n", " self.reload_button.click()\n", " elif trigger == 'shortcut':\n", " cmd_key = self.window.get_entity('reloadCmd.commandkey')\n", " self.window.send_shortcut(cmd_key)\n", " elif trigger == 'shortcut2':\n", " self.window.send_shortcut(keys.Keys.F5)\n", "\n", " @property\n", " def stop_button(self):\n", " \"\"\"Provides access to the DOM element stop button.\n", "\n", " :returns: Reference to the stop button.\n", " \"\"\"\n", " return self.marionette.find_element(By.ID, 'urlbar-stop-button')\n", "\n", " @property\n", " def urlbar(self):\n", " \"\"\"Provides access to the DOM element urlbar.\n", "\n", " :returns: Reference to the url bar.\n", " \"\"\"\n", " return self.marionette.find_element(By.ID, 'urlbar')\n", "\n", " @property\n", " def urlbar_input(self):\n", " \"\"\"Provides access to the urlbar input element.\n", "\n", " :returns: Reference to the urlbar input.\n", " \"\"\"\n", " return self.urlbar.find_element(By.ANON_ATTRIBUTE, {'anonid': 'input'})\n", "\n", " @property\n", " def value(self):\n", " \"\"\"Provides access to the currently displayed value of the urlbar.\n", "\n", " :returns: The urlbar value.\n", " \"\"\"\n", " return self.urlbar.get_attribute('value')\n", "\n", "\n", "class AutocompleteResults(UIBaseLib):\n", " \"\"\"Wraps DOM elements and methods for interacting with autocomplete results.\"\"\"\n", "\n", " def close(self, force=False):\n", " \"\"\"Closes the urlbar autocomplete popup.\n", "\n", " :param force: If true, the popup is closed by its own hide function,\n", " otherwise a key event is sent to close the popup.\n", " \"\"\"\n", " if not self.is_open:\n", " return\n", "\n", " if force:\n", " self.marionette.execute_script(\"\"\"\n", " arguments[0].hidePopup();\n", " \"\"\", script_args=[self.element])\n", " else:\n", " self.element.send_keys(keys.Keys.ESCAPE)\n", "\n", " Wait(self.marionette).until(lambda _: not self.is_open)\n", "\n", " def get_matching_text(self, result, match_type):\n", " \"\"\"Returns an array of strings of the matching text within an autocomplete\n", " result in the urlbar.\n", "\n", " :param result: The result to inspect for matches.\n", " :param match_type: The type of match to search for (one of `title` or `url`).\n", " \"\"\"\n", "\n", " if match_type == 'title':\n", " descnode = self.marionette.execute_script(\"\"\"\n", " return arguments[0].boxObject.firstChild.childNodes[1].childNodes[0];\n", " \"\"\", script_args=[result])\n", " elif match_type == 'url':\n", " descnode = self.marionette.execute_script(\"\"\"\n", " return arguments[0].boxObject.lastChild.childNodes[2].childNodes[0];\n", " \"\"\", script_args=[result])\n", " else:\n", " raise ValueError('match_type provided must be one of'\n", " '\"title\" or \"url\", not %s' % match_type)\n", "\n", " return self.marionette.execute_script(\"\"\"\n", " let rv = [];\n", " for (let node of arguments[0].childNodes) {\n", " if (node.nodeName == 'span') {\n", " rv.push(node.innerHTML);\n", " }\n", " }\n", " return rv;\n", " \"\"\", script_args=[descnode])\n", "\n", " @property\n", " def visible_results(self):\n", " \"\"\"Supplies the list of visible autocomplete result nodes.\n", "\n", " :returns: The list of visible results.\n", " \"\"\"\n", " return self.marionette.execute_script(\"\"\"\n", " let rv = [];\n", " let node = arguments[0];\n", " for (let i = 0; i < node.itemCount; ++i) {\n", " let item = node.getItemAtIndex(i);\n", " if (!item.hasAttribute(\"collapsed\")) {\n", " rv.push(item);\n", " }\n", " }\n", " return rv;\n", " \"\"\", script_args=[self.results])\n", "\n", " @property\n", " def is_open(self):\n", " \"\"\"Returns whether this popup is currently open.\n", "\n", " :returns: True when the popup is open, otherwise false.\n", " \"\"\"\n", " return self.element.get_attribute('state') == 'open'\n", "\n", " @property\n", " def is_complete(self):\n", " \"\"\"Returns when this popup is open and autocomplete results are complete.\n", "\n", " :returns: True, when autocomplete results have been populated.\n", " \"\"\"\n", " return self.marionette.execute_script(\"\"\"\n", " Components.utils.import(\"resource://gre/modules/Services.jsm\");\n", "\n", " let win = Services.focus.activeWindow;\n", " if (win) {\n", " return win.gURLBar.controller.searchStatus >=\n", " Components.interfaces.nsIAutoCompleteController.STATUS_COMPLETE_NO_MATCH;\n", " }\n", "\n", " return null;\n", " \"\"\")\n", "\n", " @property\n", " def results(self):\n", " \"\"\"\n", " :returns: The autocomplete result container node.\n", " \"\"\"\n", " return self.element.find_element(By.ANON_ATTRIBUTE,\n", " {'anonid': 'richlistbox'})\n", "\n", " @property\n", " def selected_index(self):\n", " \"\"\"Provides the index of the selected item in the autocomplete list.\n", "\n", " :returns: The index.\n", " \"\"\"\n", " return self.results.get_attribute('selectedIndex')\n", "\n", "\n", "class IdentityPopup(UIBaseLib):\n", " \"\"\"Wraps DOM elements and methods for interacting with the identity popup.\"\"\"\n", "\n", " def __init__(self, *args, **kwargs):\n", " UIBaseLib.__init__(self, *args, **kwargs)\n", "\n", " self._view = None\n", "\n", " @property\n", " def host(self):\n", " \"\"\"The DOM element which represents the identity-popup content host.\n", "\n", " :returns: Reference to the identity-popup content host.\n", " \"\"\"\n", " return self.marionette.find_element(By.ID, 'identity-popup-content-host')\n", "\n", " @property\n", " def is_open(self):\n", " \"\"\"Returns whether this popup is currently open.\n", "\n", " :returns: True when the popup is open, otherwise false.\n", " \"\"\"\n", " return self.element.get_attribute('state') == 'open'\n", "\n", " def close(self, force=False):\n", " \"\"\"Closes the identity popup by hitting the escape key.\n", "\n", " :param force: Optional, If `True` force close the popup.\n", " Defaults to `False`\n", " \"\"\"\n", " if not self.is_open:\n", " return\n", "\n", " if force:\n", " self.marionette.execute_script(\"\"\"\n", " arguments[0].hidePopup();\n", " \"\"\", script_args=[self.element])\n", " else:\n", " self.element.send_keys(keys.Keys.ESCAPE)\n", "\n", " Wait(self.marionette).until(lambda _: not self.is_open)\n", "\n", " @property\n", " def view(self):\n", " \"\"\"Provides utility members for accessing and manipulating the\n", " identity popup's multi view.\n", "\n", " See the :class:`IdentityPopupMultiView` reference.\n", " \"\"\"\n", " if not self._view:\n", " view = self.marionette.find_element(By.ID, 'identity-popup-multiView')\n", " self._view = IdentityPopupMultiView(lambda: self.marionette, self.window, view)\n", "\n", " return self._view\n", "\n", "\n", "class IdentityPopupMultiView(UIBaseLib):\n", "\n", " def _create_view_for_id(self, view_id):\n", " \"\"\"Creates an instance of :class:`IdentityPopupView` for the specified view id.\n", "\n", " :param view_id: The ID of the view to create an instance of.\n", "\n", " :returns: :class:`IdentityPopupView` instance\n", " \"\"\"\n", " mapping = {'identity-popup-mainView': IdentityPopupMainView,\n", " 'identity-popup-securityView': IdentityPopupSecurityView,\n", " }\n", "\n", " view = self.marionette.find_element(By.ID, view_id)\n", " return mapping.get(view_id, IdentityPopupView)(lambda: self.marionette, self.window, view)\n", "\n", " @property\n", " def main(self):\n", " \"\"\"The DOM element which represents the main view.\n", "\n", " :returns: Reference to the main view.\n", " \"\"\"\n", " return self._create_view_for_id('identity-popup-mainView')\n", "\n", " @property\n", " def security(self):\n", " \"\"\"The DOM element which represents the security view.\n", "\n", " :returns: Reference to the security view.\n", " \"\"\"\n", " return self._create_view_for_id('identity-popup-securityView')\n", "\n", "\n", "class IdentityPopupView(UIBaseLib):\n", "\n", " @property\n", " def selected(self):\n", " \"\"\"Checks if the view is selected.\n", "\n", " :return: `True` if the view is selected.\n", " \"\"\"\n", " return self.element.get_attribute('current') == 'true'\n", "\n", "\n", "class IdentityPopupMainView(IdentityPopupView):\n", "\n", " @property\n", " def selected(self):\n", " \"\"\"Checks if the view is selected.\n", "\n", " :return: `True` if the view is selected.\n", " \"\"\"\n", " return self.marionette.execute_script(\"\"\"\n", " return arguments[0].panelMultiView.getAttribute('viewtype') == 'main';\n", " \"\"\", script_args=[self.element])\n", "\n", " @property\n", " def expander(self):\n", " \"\"\"The DOM element which represents the expander button for the security content.\n", "\n", " :returns: Reference to the identity popup expander button.\n", " \"\"\"\n", " return self.element.find_element(By.CLASS_NAME, 'identity-popup-expander')\n", "\n", " @property\n", " def insecure_connection_label(self):\n", " \"\"\"The DOM element which represents the identity popup insecure connection label.\n", "\n", " :returns: Reference to the identity-popup insecure connection label.\n", " \"\"\"\n", " return self.element.find_element(By.CLASS_NAME, 'identity-popup-connection-not-secure')\n", "\n", " @property\n", " def internal_connection_label(self):\n", " \"\"\"The DOM element which represents the identity popup internal connection label.\n", "\n", " :returns: Reference to the identity-popup internal connection label.\n", " \"\"\"\n", " return self.element.find_element(By.CSS_SELECTOR, 'description[when-connection=chrome]')\n", "\n", " @property\n", " def permissions(self):\n", " \"\"\"The DOM element which represents the identity-popup permissions content.\n", "\n", " :returns: Reference to the identity-popup permissions.\n", " \"\"\"\n", " return self.element.find_element(By.ID, 'identity-popup-permissions-content')\n", "\n", " @property\n", " def secure_connection_label(self):\n", " \"\"\"The DOM element which represents the identity popup secure connection label.\n", "\n", " :returns: Reference to the identity-popup secure connection label.\n", " \"\"\"\n", " return self.element.find_element(By.CLASS_NAME, 'identity-popup-connection-secure')\n", "\n", "\n", "class IdentityPopupSecurityView(IdentityPopupView):\n", "\n", " @property\n", " def disable_mixed_content_blocking_button(self):\n", " \"\"\"The DOM element which represents the disable mixed content blocking button.\n", "\n", " :returns: Reference to the disable mixed content blocking button.\n", " \"\"\"\n", " return self.element.find_element(By.CSS_SELECTOR,\n", " 'button[when-mixedcontent=active-blocked]')\n", "\n", " @property\n", " def enable_mixed_content_blocking_button(self):\n", " \"\"\"The DOM element which represents the enable mixed content blocking button.\n", "\n", " :returns: Reference to the enable mixed content blocking button.\n", " \"\"\"\n", " return self.element.find_element(By.CSS_SELECTOR,\n", " 'button[when-mixedcontent=active-loaded]')\n", "\n", " @property\n", " def insecure_connection_label(self):\n", " \"\"\"The DOM element which represents the identity popup insecure connection label.\n", "\n", " :returns: Reference to the identity-popup insecure connection label.\n", " \"\"\"\n", " return self.element.find_element(By.CLASS_NAME, 'identity-popup-connection-not-secure')\n", "\n", " @property\n", " def more_info_button(self):\n", " \"\"\"The DOM element which represents the identity-popup more info button.\n", "\n", " :returns: Reference to the identity-popup more info button.\n", " \"\"\"\n", " label = self.window.get_entity('identity.moreInfoLinkText2')\n", "\n", " return self.element.find_element(By.CSS_SELECTOR, u'button[label=\"{}\"]'.format(label))\n", "\n", " @property\n", " def owner(self):\n", " \"\"\"The DOM element which represents the identity-popup content owner.\n", "\n", " :returns: Reference to the identity-popup content owner.\n", " \"\"\"\n", " return self.element.find_element(By.ID, 'identity-popup-content-owner')\n", "\n", " @property\n", " def owner_location(self):\n", " \"\"\"The DOM element which represents the identity-popup content supplemental.\n", "\n", " :returns: Reference to the identity-popup content supplemental.\n", " \"\"\"\n", " return self.element.find_element(By.ID, 'identity-popup-content-supplemental')\n", "\n", " @property\n", " def secure_connection_label(self):\n", " \"\"\"The DOM element which represents the identity popup secure connection label.\n", "\n", " :returns: Reference to the identity-popup secure connection label.\n", " \"\"\"\n", " return self.element.find_element(By.CLASS_NAME, 'identity-popup-connection-secure')\n", "\n", " @property\n", " def verifier(self):\n", " \"\"\"The DOM element which represents the identity-popup content verifier.\n", "\n", " :returns: Reference to the identity-popup content verifier.\n", " \"\"\"\n", " return self.element.find_element(By.ID, 'identity-popup-content-verifier')\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0.011627906976744186, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010638297872340425, 0.01098901098901099, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010101010101010102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0.010416666666666666, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0.010309278350515464, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0.010416666666666666, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0.010526315789473684, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0.010869565217391304, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0.012048192771084338 ]
630
0.000857
"""Online Non-Negative Matrix Factorization. Implementation of the efficient incremental algorithm of Renbo Zhao, Vincent Y. F. Tan et al. `[PDF] <https://arxiv.org/abs/1604.02634>`_. This NMF implementation updates in a streaming fashion and works best with sparse corpora. - W is a word-topic matrix - h is a topic-document matrix - v is an input corpus batch, word-document matrix - A, B - matrices that accumulate information from every consecutive chunk. A = h.dot(ht), B = v.dot(ht). The idea of the algorithm is as follows: .. code-block:: text Initialize W, A and B matrices Input the corpus Split the corpus into batches for v in batches: infer h: do coordinate gradient descent step to find h that minimizes (v - Wh) l2 norm bound h so that it is non-negative update A and B: A = h.dot(ht) B = v.dot(ht) update W: do gradient descent step to find W that minimizes 0.5*trace(WtWA) - trace(WtB) l2 norm Examples -------- Train an NMF model using a Gensim corpus .. sourcecode:: pycon >>> from gensim.models import Nmf >>> from gensim.test.utils import common_texts >>> from gensim.corpora.dictionary import Dictionary >>> >>> # Create a corpus from a list of texts >>> common_dictionary = Dictionary(common_texts) >>> common_corpus = [common_dictionary.doc2bow(text) for text in common_texts] >>> >>> # Train the model on the corpus. >>> nmf = Nmf(common_corpus, num_topics=10) Save a model to disk, or reload a pre-trained model .. sourcecode:: pycon >>> from gensim.test.utils import datapath >>> >>> # Save model to disk. >>> temp_file = datapath("model") >>> nmf.save(temp_file) >>> >>> # Load a potentially pretrained model from disk. >>> nmf = Nmf.load(temp_file) Infer vectors for new documents .. sourcecode:: pycon >>> # Create a new corpus, made of previously unseen documents. >>> other_texts = [ ... ['computer', 'time', 'graph'], ... ['survey', 'response', 'eps'], ... ['human', 'system', 'computer'] ... ] >>> other_corpus = [common_dictionary.doc2bow(text) for text in other_texts] >>> >>> unseen_doc = other_corpus[0] >>> vector = Nmf[unseen_doc] # get topic probability distribution for a document Update the model by incrementally training on the new corpus .. sourcecode:: pycon >>> nmf.update(other_corpus) >>> vector = nmf[unseen_doc] A lot of parameters can be tuned to optimize training for your specific case .. sourcecode:: pycon >>> nmf = Nmf(common_corpus, num_topics=50, kappa=0.1, eval_every=5) # decrease training step size The NMF should be used whenever one needs extremely fast and memory optimized topic model. """ import collections.abc import logging import numpy as np import scipy.sparse from scipy.stats import halfnorm from gensim import interfaces from gensim import matutils from gensim import utils from gensim.interfaces import TransformedCorpus from gensim.models import basemodel, CoherenceModel from gensim.models.nmf_pgd import solve_h logger = logging.getLogger(__name__) def version_tuple(version, prefix=2): return tuple(map(int, version.split(".")[:prefix])) OLD_SCIPY = version_tuple(scipy.__version__) <= (0, 18) class Nmf(interfaces.TransformationABC, basemodel.BaseTopicModel): """Online Non-Negative Matrix Factorization. `Renbo Zhao et al :"Online Nonnegative Matrix Factorization with Outliers" <https://arxiv.org/abs/1604.02634>`_ """ def __init__( self, corpus=None, num_topics=100, id2word=None, chunksize=2000, passes=1, kappa=1.0, minimum_probability=0.01, w_max_iter=200, w_stop_condition=1e-4, h_max_iter=50, h_stop_condition=1e-3, eval_every=10, normalize=True, random_state=None, ): r""" Parameters ---------- corpus : iterable of list of (int, float) or `csc_matrix` with the shape (n_tokens, n_documents), optional Training corpus. Can be either iterable of documents, which are lists of `(word_id, word_count)`, or a sparse csc matrix of BOWs for each document. If not specified, the model is left uninitialized (presumably, to be trained later with `self.train()`). num_topics : int, optional Number of topics to extract. id2word: {dict of (int, str), :class:`gensim.corpora.dictionary.Dictionary`} Mapping from word IDs to words. It is used to determine the vocabulary size, as well as for debugging and topic printing. chunksize: int, optional Number of documents to be used in each training chunk. passes: int, optional Number of full passes over the training corpus. Leave at default `passes=1` if your input is an iterator. kappa : float, optional Gradient descent step size. Larger value makes the model train faster, but could lead to non-convergence if set too large. minimum_probability: If `normalize` is True, topics with smaller probabilities are filtered out. If `normalize` is False, topics with smaller factors are filtered out. If set to None, a value of 1e-8 is used to prevent 0s. w_max_iter: int, optional Maximum number of iterations to train W per each batch. w_stop_condition: float, optional If error difference gets less than that, training of ``W`` stops for the current batch. h_max_iter: int, optional Maximum number of iterations to train h per each batch. h_stop_condition: float If error difference gets less than that, training of ``h`` stops for the current batch. eval_every: int, optional Number of batches after which l2 norm of (v - Wh) is computed. Decreases performance if set too low. normalize: bool or None, optional Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c. random_state: {np.random.RandomState, int}, optional Seed for random generator. Needed for reproducibility. """ self.num_topics = num_topics self.id2word = id2word self.chunksize = chunksize self.passes = passes self._kappa = kappa self.minimum_probability = minimum_probability self._w_max_iter = w_max_iter self._w_stop_condition = w_stop_condition self._h_max_iter = h_max_iter self._h_stop_condition = h_stop_condition self.eval_every = eval_every self.normalize = normalize self.random_state = utils.get_random_state(random_state) self.v_max = None if self.id2word is None: self.id2word = utils.dict_from_corpus(corpus) self.num_tokens = len(self.id2word) self.A = None self.B = None self._W = None self.w_std = None self._w_error = np.inf self._h = None if corpus is not None: self.update(corpus) def get_topics(self, normalize=None): """Get the term-topic matrix learned during inference. Parameters ---------- normalize: bool or None, optional Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c. Returns ------- numpy.ndarray The probability for each word in each topic, shape (`num_topics`, `vocabulary_size`). """ dense_topics = self._W.T if normalize is None: normalize = self.normalize if normalize: return dense_topics / dense_topics.sum(axis=1).reshape(-1, 1) return dense_topics def __getitem__(self, bow, eps=None): return self.get_document_topics(bow, eps) def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True, normalize=None): """Get the topics sorted by sparsity. Parameters ---------- num_topics : int, optional Number of topics to be returned. Unlike LSA, there is no natural ordering between the topics in NMF. The returned topics subset of all topics is therefore arbitrary and may change between two NMF training runs. num_words : int, optional Number of words to be presented for each topic. These will be the most relevant words (assigned the highest probability for each topic). log : bool, optional Whether the result is also logged, besides being returned. formatted : bool, optional Whether the topic representations should be formatted as strings. If False, they are returned as 2 tuples of (word, probability). normalize: bool or None, optional Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c. Returns ------- list of {str, tuple of (str, float)} a list of topics, each represented either as a string (when `formatted` == True) or word-probability pairs. """ if normalize is None: normalize = self.normalize # Compute fraction of zero elements in each column sparsity = np.zeros(self._W.shape[1]) for row in self._W: sparsity += (row == 0) sparsity /= self._W.shape[0] if num_topics < 0 or num_topics >= self.num_topics: num_topics = self.num_topics chosen_topics = range(num_topics) else: num_topics = min(num_topics, self.num_topics) sorted_topics = list(matutils.argsort(sparsity)) chosen_topics = ( sorted_topics[: num_topics // 2] + sorted_topics[-num_topics // 2:] ) shown = [] topics = self.get_topics(normalize=normalize) for i in chosen_topics: topic = topics[i] bestn = matutils.argsort(topic, num_words, reverse=True).ravel() topic = [(self.id2word[id], topic[id]) for id in bestn] if formatted: topic = " + ".join(['%.3f*"%s"' % (v, k) for k, v in topic]) shown.append((i, topic)) if log: logger.info("topic #%i (%.3f): %s", i, sparsity[i], topic) return shown def show_topic(self, topicid, topn=10, normalize=None): """Get the representation for a single topic. Words here are the actual strings, in constrast to :meth:`~gensim.models.nmf.Nmf.get_topic_terms` that represents words by their vocabulary ID. Parameters ---------- topicid : int The ID of the topic to be returned topn : int, optional Number of the most significant words that are associated with the topic. normalize: bool or None, optional Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c. Returns ------- list of (str, float) Word - probability pairs for the most relevant words generated by the topic. """ if normalize is None: normalize = self.normalize return [ (self.id2word[id], value) for id, value in self.get_topic_terms(topicid, topn, normalize=normalize) ] def get_topic_terms(self, topicid, topn=10, normalize=None): """Get the representation for a single topic. Words the integer IDs, in constrast to :meth:`~gensim.models.nmf.Nmf.show_topic` that represents words by the actual strings. Parameters ---------- topicid : int The ID of the topic to be returned topn : int, optional Number of the most significant words that are associated with the topic. normalize: bool or None, optional Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c. Returns ------- list of (int, float) Word ID - probability pairs for the most relevant words generated by the topic. """ topic = self._W[:, topicid] if normalize is None: normalize = self.normalize if normalize: topic /= topic.sum() bestn = matutils.argsort(topic, topn, reverse=True) return [(idx, topic[idx]) for idx in bestn] def top_topics(self, corpus, texts=None, dictionary=None, window_size=None, coherence='u_mass', topn=20, processes=-1): """Get the topics sorted by coherence. Parameters ---------- corpus : iterable of list of (int, float) or `csc_matrix` with the shape (n_tokens, n_documents) Training corpus. Can be either iterable of documents, which are lists of `(word_id, word_count)`, or a sparse csc matrix of BOWs for each document. If not specified, the model is left uninitialized (presumably, to be trained later with `self.train()`). texts : list of list of str, optional Tokenized texts, needed for coherence models that use sliding window based (i.e. coherence=`c_something`) probability estimator . dictionary : {dict of (int, str), :class:`gensim.corpora.dictionary.Dictionary`}, optional Dictionary mapping of id word to create corpus. If `model.id2word` is present, this is not needed. If both are provided, passed `dictionary` will be used. window_size : int, optional Is the size of the window to be used for coherence measures using boolean sliding window as their probability estimator. For 'u_mass' this doesn't matter. If None - the default window sizes are used which are: 'c_v' - 110, 'c_uci' - 10, 'c_npmi' - 10. coherence : {'u_mass', 'c_v', 'c_uci', 'c_npmi'}, optional Coherence measure to be used. Fastest method - 'u_mass', 'c_uci' also known as `c_pmi`. For 'u_mass' corpus should be provided, if texts is provided, it will be converted to corpus using the dictionary. For 'c_v', 'c_uci' and 'c_npmi' `texts` should be provided (`corpus` isn't needed) topn : int, optional Integer corresponding to the number of top words to be extracted from each topic. processes : int, optional Number of processes to use for probability estimation phase, any value less than 1 will be interpreted as num_cpus - 1. Returns ------- list of (list of (int, str), float) Each element in the list is a pair of a topic representation and its coherence score. Topic representations are distributions of words, represented as a list of pairs of word IDs and their probabilities. """ cm = CoherenceModel( model=self, corpus=corpus, texts=texts, dictionary=dictionary, window_size=window_size, coherence=coherence, topn=topn, processes=processes ) coherence_scores = cm.get_coherence_per_topic() str_topics = [] for topic in self.get_topics(): # topic = array of vocab_size floats, one per term bestn = matutils.argsort(topic, topn=topn, reverse=True) # top terms for topic beststr = [(topic[_id], self.id2word[_id]) for _id in bestn] # membership, token str_topics.append(beststr) # list of topn (float membership, token) tuples scored_topics = zip(str_topics, coherence_scores) return sorted(scored_topics, key=lambda tup: tup[1], reverse=True) def get_term_topics(self, word_id, minimum_probability=None, normalize=None): """Get the most relevant topics to the given word. Parameters ---------- word_id : int The word for which the topic distribution will be computed. minimum_probability : float, optional If `normalize` is True, topics with smaller probabilities are filtered out. If `normalize` is False, topics with smaller factors are filtered out. If set to None, a value of 1e-8 is used to prevent 0s. normalize: bool or None, optional Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c. Returns ------- list of (int, float) The relevant topics represented as pairs of their ID and their assigned probability, sorted by relevance to the given word. """ if minimum_probability is None: minimum_probability = self.minimum_probability minimum_probability = max(minimum_probability, 1e-8) # if user enters word instead of id in vocab, change to get id if isinstance(word_id, str): word_id = self.id2word.doc2bow([word_id])[0][0] values = [] word_topics = self._W[word_id] if normalize is None: normalize = self.normalize if normalize and word_topics.sum() > 0: word_topics /= word_topics.sum() for topic_id in range(0, self.num_topics): word_coef = word_topics[topic_id] if word_coef >= minimum_probability: values.append((topic_id, word_coef)) return values def get_document_topics(self, bow, minimum_probability=None, normalize=None): """Get the topic distribution for the given document. Parameters ---------- bow : list of (int, float) The document in BOW format. minimum_probability : float If `normalize` is True, topics with smaller probabilities are filtered out. If `normalize` is False, topics with smaller factors are filtered out. If set to None, a value of 1e-8 is used to prevent 0s. normalize: bool or None, optional Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c. Returns ------- list of (int, float) Topic distribution for the whole document. Each element in the list is a pair of a topic's id, and the probability that was assigned to it. """ if minimum_probability is None: minimum_probability = self.minimum_probability minimum_probability = max(minimum_probability, 1e-8) # if the input vector is a corpus, return a transformed corpus is_corpus, corpus = utils.is_corpus(bow) if is_corpus: kwargs = dict(minimum_probability=minimum_probability) return self._apply(corpus, **kwargs) v = matutils.corpus2csc([bow], self.num_tokens) h = self._solveproj(v, self._W, v_max=np.inf) if normalize is None: normalize = self.normalize if normalize: the_sum = h.sum() if the_sum: h /= the_sum return [ (idx, proba) for idx, proba in enumerate(h[:, 0]) if not minimum_probability or proba > minimum_probability ] def _setup(self, v): """Infer info from the first batch and initialize the matrices. Parameters ---------- v : `csc_matrix` with the shape (n_tokens, chunksize) Batch of bows. """ self.w_std = np.sqrt(v.mean() / (self.num_tokens * self.num_topics)) self._W = np.abs( self.w_std * halfnorm.rvs( size=(self.num_tokens, self.num_topics), random_state=self.random_state ) ) self.A = np.zeros((self.num_topics, self.num_topics)) self.B = np.zeros((self.num_tokens, self.num_topics)) def l2_norm(self, v): Wt = self._W.T l2 = 0 for doc, doc_topics in zip(v.T, self._h.T): l2 += np.sum(np.square((doc - doc_topics.dot(Wt)))) return np.sqrt(l2) def update(self, corpus, chunksize=None, passes=None, eval_every=None): """Train the model with new documents. Parameters ---------- corpus : iterable of list of (int, float) or `csc_matrix` with the shape (n_tokens, n_documents) Training corpus. Can be either iterable of documents, which are lists of `(word_id, word_count)`, or a sparse csc matrix of BOWs for each document. If not specified, the model is left uninitialized (presumably, to be trained later with `self.train()`). chunksize: int, optional Number of documents to be used in each training chunk. passes: int, optional Number of full passes over the training corpus. Leave at default `passes=1` if your input is an iterator. eval_every: int, optional Number of batches after which l2 norm of (v - Wh) is computed. Decreases performance if set too low. """ # use parameters given in constructor, unless user explicitly overrode them if passes is None: passes = self.passes if eval_every is None: eval_every = self.eval_every lencorpus = np.inf if isinstance(corpus, scipy.sparse.csc.csc_matrix): lencorpus = corpus.shape[1] else: try: lencorpus = len(corpus) except TypeError: logger.info("input corpus stream has no len()") if chunksize is None: chunksize = min(lencorpus, self.chunksize) evalafter = min(lencorpus, (eval_every or 0) * chunksize) if lencorpus == 0: logger.warning("Nmf.update() called with an empty corpus") return if isinstance(corpus, collections.abc.Iterator) and self.passes > 1: raise ValueError("Corpus is an iterator, only `passes=1` is valid.") logger.info( "running NMF training, %s topics, %i passes over the supplied corpus of %s documents, evaluating L2 " "norm every %i documents", self.num_topics, passes, "unknown number of" if lencorpus is None else lencorpus, evalafter, ) chunk_overall_idx = 1 for pass_ in range(passes): if isinstance(corpus, scipy.sparse.csc.csc_matrix): grouper = ( # Older scipy (0.19 etc) throw an error when slicing beyond the actual sparse array dimensions, so # we clip manually with min() here. corpus[:, col_idx:min(corpus.shape[1], col_idx + self.chunksize)] for col_idx in range(0, corpus.shape[1], self.chunksize) ) else: grouper = utils.grouper(corpus, self.chunksize) for chunk_idx, chunk in enumerate(grouper): if isinstance(corpus, scipy.sparse.csc.csc_matrix): v = chunk[:, self.random_state.permutation(chunk.shape[1])] chunk_len = v.shape[1] else: self.random_state.shuffle(chunk) v = matutils.corpus2csc( chunk, num_terms=self.num_tokens, ) chunk_len = len(chunk) if np.isinf(lencorpus): logger.info( "PROGRESS: pass %i, at document #%i", pass_, chunk_idx * chunksize + chunk_len ) else: logger.info( "PROGRESS: pass %i, at document #%i/%i", pass_, chunk_idx * chunksize + chunk_len, lencorpus ) if self._W is None: # If `self._W` is not set (i.e. the first batch being handled), compute the initial matrix using the # batch mean. self._setup(v) self._h = self._solveproj(v, self._W, h=self._h, v_max=self.v_max) h = self._h if eval_every and (((chunk_idx + 1) * chunksize >= lencorpus) or (chunk_idx + 1) % eval_every == 0): logger.info("L2 norm: %s", self.l2_norm(v)) self.print_topics(5) self.A *= chunk_overall_idx - 1 self.A += h.dot(h.T) self.A /= chunk_overall_idx self.B *= chunk_overall_idx - 1 self.B += v.dot(h.T) self.B /= chunk_overall_idx self._solve_w() chunk_overall_idx += 1 logger.info("W error: %s", self._w_error) def _solve_w(self): """Update W.""" def error(WA): """An optimized version of 0.5 * trace(WtWA) - trace(WtB).""" return 0.5 * np.einsum('ij,ij', WA, self._W) - np.einsum('ij,ij', self._W, self.B) eta = self._kappa / np.linalg.norm(self.A) for iter_number in range(self._w_max_iter): logger.debug("w_error: %s", self._w_error) WA = self._W.dot(self.A) self._W -= eta * (WA - self.B) self._transform() error_ = error(WA) if ( self._w_error < np.inf and np.abs((error_ - self._w_error) / self._w_error) < self._w_stop_condition ): self._w_error = error_ break self._w_error = error_ def _apply(self, corpus, chunksize=None, **kwargs): """Apply the transformation to a whole corpus and get the result as another corpus. Parameters ---------- corpus : iterable of list of (int, float) or `csc_matrix` with the shape (n_tokens, n_documents) Training corpus. Can be either iterable of documents, which are lists of `(word_id, word_count)`, or a sparse csc matrix of BOWs for each document. If not specified, the model is left uninitialized (presumably, to be trained later with `self.train()`). chunksize : int, optional If provided, a more effective processing will performed. Returns ------- :class:`~gensim.interfaces.TransformedCorpus` Transformed corpus. """ return TransformedCorpus(self, corpus, chunksize, **kwargs) def _transform(self): """Apply boundaries on W.""" np.clip(self._W, 0, self.v_max, out=self._W) sumsq = np.sqrt(np.einsum('ij,ij->j', self._W, self._W)) np.maximum(sumsq, 1, out=sumsq) self._W /= sumsq @staticmethod def _dense_dot_csc(dense, csc): if OLD_SCIPY: return (csc.T.dot(dense.T)).T else: return scipy.sparse.csc_matrix.dot(dense, csc) def _solveproj(self, v, W, h=None, v_max=None): """Update residuals and representation (h) matrices. Parameters ---------- v : scipy.sparse.csc_matrix Subset of training corpus. W : ndarray Dictionary matrix. h : ndarray Representation matrix. v_max : float Maximum possible value in matrices. """ m, n = W.shape if v_max is not None: self.v_max = v_max elif self.v_max is None: self.v_max = v.max() batch_size = v.shape[1] hshape = (n, batch_size) if h is None or h.shape != hshape: h = np.zeros(hshape) Wt = W.T WtW = Wt.dot(W) h_error = None for iter_number in range(self._h_max_iter): logger.debug("h_error: %s", h_error) Wtv = self._dense_dot_csc(Wt, v) permutation = self.random_state.permutation(self.num_topics).astype(np.int32) error_ = solve_h(h, Wtv, WtW, permutation, self._kappa) error_ /= m if h_error and np.abs(h_error - error_) < self._h_stop_condition: break h_error = error_ return h
[ "\"\"\"Online Non-Negative Matrix Factorization.\n", "Implementation of the efficient incremental algorithm of Renbo Zhao, Vincent Y. F. Tan et al.\n", "`[PDF] <https://arxiv.org/abs/1604.02634>`_.\n", "\n", "This NMF implementation updates in a streaming fashion and works best with sparse corpora.\n", "\n", "- W is a word-topic matrix\n", "- h is a topic-document matrix\n", "- v is an input corpus batch, word-document matrix\n", "- A, B - matrices that accumulate information from every consecutive chunk. A = h.dot(ht), B = v.dot(ht).\n", "\n", "The idea of the algorithm is as follows:\n", "\n", ".. code-block:: text\n", "\n", " Initialize W, A and B matrices\n", "\n", " Input the corpus\n", " Split the corpus into batches\n", "\n", " for v in batches:\n", " infer h:\n", " do coordinate gradient descent step to find h that minimizes (v - Wh) l2 norm\n", "\n", " bound h so that it is non-negative\n", "\n", " update A and B:\n", " A = h.dot(ht)\n", " B = v.dot(ht)\n", "\n", " update W:\n", " do gradient descent step to find W that minimizes 0.5*trace(WtWA) - trace(WtB) l2 norm\n", "\n", "Examples\n", "--------\n", "\n", "Train an NMF model using a Gensim corpus\n", "\n", ".. sourcecode:: pycon\n", "\n", " >>> from gensim.models import Nmf\n", " >>> from gensim.test.utils import common_texts\n", " >>> from gensim.corpora.dictionary import Dictionary\n", " >>>\n", " >>> # Create a corpus from a list of texts\n", " >>> common_dictionary = Dictionary(common_texts)\n", " >>> common_corpus = [common_dictionary.doc2bow(text) for text in common_texts]\n", " >>>\n", " >>> # Train the model on the corpus.\n", " >>> nmf = Nmf(common_corpus, num_topics=10)\n", "\n", "Save a model to disk, or reload a pre-trained model\n", "\n", ".. sourcecode:: pycon\n", "\n", " >>> from gensim.test.utils import datapath\n", " >>>\n", " >>> # Save model to disk.\n", " >>> temp_file = datapath(\"model\")\n", " >>> nmf.save(temp_file)\n", " >>>\n", " >>> # Load a potentially pretrained model from disk.\n", " >>> nmf = Nmf.load(temp_file)\n", "\n", "Infer vectors for new documents\n", "\n", ".. sourcecode:: pycon\n", "\n", " >>> # Create a new corpus, made of previously unseen documents.\n", " >>> other_texts = [\n", " ... ['computer', 'time', 'graph'],\n", " ... ['survey', 'response', 'eps'],\n", " ... ['human', 'system', 'computer']\n", " ... ]\n", " >>> other_corpus = [common_dictionary.doc2bow(text) for text in other_texts]\n", " >>>\n", " >>> unseen_doc = other_corpus[0]\n", " >>> vector = Nmf[unseen_doc] # get topic probability distribution for a document\n", "\n", "Update the model by incrementally training on the new corpus\n", "\n", ".. sourcecode:: pycon\n", "\n", " >>> nmf.update(other_corpus)\n", " >>> vector = nmf[unseen_doc]\n", "\n", "A lot of parameters can be tuned to optimize training for your specific case\n", "\n", ".. sourcecode:: pycon\n", "\n", " >>> nmf = Nmf(common_corpus, num_topics=50, kappa=0.1, eval_every=5) # decrease training step size\n", "\n", "The NMF should be used whenever one needs extremely fast and memory optimized topic model.\n", "\n", "\"\"\"\n", "\n", "\n", "import collections.abc\n", "import logging\n", "\n", "import numpy as np\n", "import scipy.sparse\n", "from scipy.stats import halfnorm\n", "\n", "from gensim import interfaces\n", "from gensim import matutils\n", "from gensim import utils\n", "from gensim.interfaces import TransformedCorpus\n", "from gensim.models import basemodel, CoherenceModel\n", "from gensim.models.nmf_pgd import solve_h\n", "\n", "logger = logging.getLogger(__name__)\n", "\n", "\n", "def version_tuple(version, prefix=2):\n", " return tuple(map(int, version.split(\".\")[:prefix]))\n", "\n", "\n", "OLD_SCIPY = version_tuple(scipy.__version__) <= (0, 18)\n", "\n", "\n", "class Nmf(interfaces.TransformationABC, basemodel.BaseTopicModel):\n", " \"\"\"Online Non-Negative Matrix Factorization.\n", "\n", " `Renbo Zhao et al :\"Online Nonnegative Matrix Factorization with Outliers\" <https://arxiv.org/abs/1604.02634>`_\n", "\n", " \"\"\"\n", "\n", " def __init__(\n", " self,\n", " corpus=None,\n", " num_topics=100,\n", " id2word=None,\n", " chunksize=2000,\n", " passes=1,\n", " kappa=1.0,\n", " minimum_probability=0.01,\n", " w_max_iter=200,\n", " w_stop_condition=1e-4,\n", " h_max_iter=50,\n", " h_stop_condition=1e-3,\n", " eval_every=10,\n", " normalize=True,\n", " random_state=None,\n", " ):\n", " r\"\"\"\n", "\n", " Parameters\n", " ----------\n", " corpus : iterable of list of (int, float) or `csc_matrix` with the shape (n_tokens, n_documents), optional\n", " Training corpus.\n", " Can be either iterable of documents, which are lists of `(word_id, word_count)`,\n", " or a sparse csc matrix of BOWs for each document.\n", " If not specified, the model is left uninitialized (presumably, to be trained later with `self.train()`).\n", " num_topics : int, optional\n", " Number of topics to extract.\n", " id2word: {dict of (int, str), :class:`gensim.corpora.dictionary.Dictionary`}\n", " Mapping from word IDs to words. It is used to determine the vocabulary size, as well as for\n", " debugging and topic printing.\n", " chunksize: int, optional\n", " Number of documents to be used in each training chunk.\n", " passes: int, optional\n", " Number of full passes over the training corpus.\n", " Leave at default `passes=1` if your input is an iterator.\n", " kappa : float, optional\n", " Gradient descent step size.\n", " Larger value makes the model train faster, but could lead to non-convergence if set too large.\n", " minimum_probability:\n", " If `normalize` is True, topics with smaller probabilities are filtered out.\n", " If `normalize` is False, topics with smaller factors are filtered out.\n", " If set to None, a value of 1e-8 is used to prevent 0s.\n", " w_max_iter: int, optional\n", " Maximum number of iterations to train W per each batch.\n", " w_stop_condition: float, optional\n", " If error difference gets less than that, training of ``W`` stops for the current batch.\n", " h_max_iter: int, optional\n", " Maximum number of iterations to train h per each batch.\n", " h_stop_condition: float\n", " If error difference gets less than that, training of ``h`` stops for the current batch.\n", " eval_every: int, optional\n", " Number of batches after which l2 norm of (v - Wh) is computed. Decreases performance if set too low.\n", " normalize: bool or None, optional\n", " Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c.\n", " random_state: {np.random.RandomState, int}, optional\n", " Seed for random generator. Needed for reproducibility.\n", "\n", " \"\"\"\n", " self.num_topics = num_topics\n", " self.id2word = id2word\n", " self.chunksize = chunksize\n", " self.passes = passes\n", " self._kappa = kappa\n", " self.minimum_probability = minimum_probability\n", " self._w_max_iter = w_max_iter\n", " self._w_stop_condition = w_stop_condition\n", " self._h_max_iter = h_max_iter\n", " self._h_stop_condition = h_stop_condition\n", " self.eval_every = eval_every\n", " self.normalize = normalize\n", " self.random_state = utils.get_random_state(random_state)\n", "\n", " self.v_max = None\n", "\n", " if self.id2word is None:\n", " self.id2word = utils.dict_from_corpus(corpus)\n", "\n", " self.num_tokens = len(self.id2word)\n", "\n", " self.A = None\n", " self.B = None\n", "\n", " self._W = None\n", " self.w_std = None\n", " self._w_error = np.inf\n", "\n", " self._h = None\n", "\n", " if corpus is not None:\n", " self.update(corpus)\n", "\n", " def get_topics(self, normalize=None):\n", " \"\"\"Get the term-topic matrix learned during inference.\n", "\n", " Parameters\n", " ----------\n", " normalize: bool or None, optional\n", " Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c.\n", "\n", " Returns\n", " -------\n", " numpy.ndarray\n", " The probability for each word in each topic, shape (`num_topics`, `vocabulary_size`).\n", "\n", " \"\"\"\n", " dense_topics = self._W.T\n", " if normalize is None:\n", " normalize = self.normalize\n", " if normalize:\n", " return dense_topics / dense_topics.sum(axis=1).reshape(-1, 1)\n", "\n", " return dense_topics\n", "\n", " def __getitem__(self, bow, eps=None):\n", " return self.get_document_topics(bow, eps)\n", "\n", " def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True, normalize=None):\n", " \"\"\"Get the topics sorted by sparsity.\n", "\n", " Parameters\n", " ----------\n", " num_topics : int, optional\n", " Number of topics to be returned. Unlike LSA, there is no natural ordering between the topics in NMF.\n", " The returned topics subset of all topics is therefore arbitrary and may change between two NMF\n", " training runs.\n", " num_words : int, optional\n", " Number of words to be presented for each topic. These will be the most relevant words (assigned the highest\n", " probability for each topic).\n", " log : bool, optional\n", " Whether the result is also logged, besides being returned.\n", " formatted : bool, optional\n", " Whether the topic representations should be formatted as strings. If False, they are returned as\n", " 2 tuples of (word, probability).\n", " normalize: bool or None, optional\n", " Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c.\n", "\n", " Returns\n", " -------\n", " list of {str, tuple of (str, float)}\n", " a list of topics, each represented either as a string (when `formatted` == True) or word-probability\n", " pairs.\n", "\n", " \"\"\"\n", " if normalize is None:\n", " normalize = self.normalize\n", "\n", " # Compute fraction of zero elements in each column\n", "\n", " sparsity = np.zeros(self._W.shape[1])\n", "\n", " for row in self._W:\n", " sparsity += (row == 0)\n", "\n", " sparsity /= self._W.shape[0]\n", "\n", " if num_topics < 0 or num_topics >= self.num_topics:\n", " num_topics = self.num_topics\n", " chosen_topics = range(num_topics)\n", " else:\n", " num_topics = min(num_topics, self.num_topics)\n", "\n", " sorted_topics = list(matutils.argsort(sparsity))\n", " chosen_topics = (\n", " sorted_topics[: num_topics // 2] + sorted_topics[-num_topics // 2:]\n", " )\n", "\n", " shown = []\n", "\n", " topics = self.get_topics(normalize=normalize)\n", "\n", " for i in chosen_topics:\n", " topic = topics[i]\n", " bestn = matutils.argsort(topic, num_words, reverse=True).ravel()\n", " topic = [(self.id2word[id], topic[id]) for id in bestn]\n", " if formatted:\n", " topic = \" + \".join(['%.3f*\"%s\"' % (v, k) for k, v in topic])\n", "\n", " shown.append((i, topic))\n", " if log:\n", " logger.info(\"topic #%i (%.3f): %s\", i, sparsity[i], topic)\n", "\n", " return shown\n", "\n", " def show_topic(self, topicid, topn=10, normalize=None):\n", " \"\"\"Get the representation for a single topic. Words here are the actual strings, in constrast to\n", " :meth:`~gensim.models.nmf.Nmf.get_topic_terms` that represents words by their vocabulary ID.\n", "\n", " Parameters\n", " ----------\n", " topicid : int\n", " The ID of the topic to be returned\n", " topn : int, optional\n", " Number of the most significant words that are associated with the topic.\n", " normalize: bool or None, optional\n", " Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c.\n", "\n", " Returns\n", " -------\n", " list of (str, float)\n", " Word - probability pairs for the most relevant words generated by the topic.\n", "\n", " \"\"\"\n", " if normalize is None:\n", " normalize = self.normalize\n", "\n", " return [\n", " (self.id2word[id], value)\n", " for id, value in self.get_topic_terms(topicid, topn,\n", " normalize=normalize)\n", " ]\n", "\n", " def get_topic_terms(self, topicid, topn=10, normalize=None):\n", " \"\"\"Get the representation for a single topic. Words the integer IDs, in constrast to\n", " :meth:`~gensim.models.nmf.Nmf.show_topic` that represents words by the actual strings.\n", "\n", " Parameters\n", " ----------\n", " topicid : int\n", " The ID of the topic to be returned\n", " topn : int, optional\n", " Number of the most significant words that are associated with the topic.\n", " normalize: bool or None, optional\n", " Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c.\n", "\n", " Returns\n", " -------\n", " list of (int, float)\n", " Word ID - probability pairs for the most relevant words generated by the topic.\n", "\n", " \"\"\"\n", " topic = self._W[:, topicid]\n", "\n", " if normalize is None:\n", " normalize = self.normalize\n", " if normalize:\n", " topic /= topic.sum()\n", "\n", " bestn = matutils.argsort(topic, topn, reverse=True)\n", " return [(idx, topic[idx]) for idx in bestn]\n", "\n", " def top_topics(self, corpus, texts=None, dictionary=None, window_size=None,\n", " coherence='u_mass', topn=20, processes=-1):\n", " \"\"\"Get the topics sorted by coherence.\n", "\n", " Parameters\n", " ----------\n", " corpus : iterable of list of (int, float) or `csc_matrix` with the shape (n_tokens, n_documents)\n", " Training corpus.\n", " Can be either iterable of documents, which are lists of `(word_id, word_count)`,\n", " or a sparse csc matrix of BOWs for each document.\n", " If not specified, the model is left uninitialized (presumably, to be trained later with `self.train()`).\n", " texts : list of list of str, optional\n", " Tokenized texts, needed for coherence models that use sliding window based (i.e. coherence=`c_something`)\n", " probability estimator .\n", " dictionary : {dict of (int, str), :class:`gensim.corpora.dictionary.Dictionary`}, optional\n", " Dictionary mapping of id word to create corpus.\n", " If `model.id2word` is present, this is not needed. If both are provided, passed `dictionary` will be used.\n", " window_size : int, optional\n", " Is the size of the window to be used for coherence measures using boolean sliding window as their\n", " probability estimator. For 'u_mass' this doesn't matter.\n", " If None - the default window sizes are used which are: 'c_v' - 110, 'c_uci' - 10, 'c_npmi' - 10.\n", " coherence : {'u_mass', 'c_v', 'c_uci', 'c_npmi'}, optional\n", " Coherence measure to be used.\n", " Fastest method - 'u_mass', 'c_uci' also known as `c_pmi`.\n", " For 'u_mass' corpus should be provided, if texts is provided, it will be converted to corpus\n", " using the dictionary. For 'c_v', 'c_uci' and 'c_npmi' `texts` should be provided (`corpus` isn't needed)\n", " topn : int, optional\n", " Integer corresponding to the number of top words to be extracted from each topic.\n", " processes : int, optional\n", " Number of processes to use for probability estimation phase, any value less than 1 will be interpreted as\n", " num_cpus - 1.\n", "\n", " Returns\n", " -------\n", " list of (list of (int, str), float)\n", " Each element in the list is a pair of a topic representation and its coherence score. Topic representations\n", " are distributions of words, represented as a list of pairs of word IDs and their probabilities.\n", "\n", " \"\"\"\n", " cm = CoherenceModel(\n", " model=self, corpus=corpus, texts=texts, dictionary=dictionary,\n", " window_size=window_size, coherence=coherence, topn=topn,\n", " processes=processes\n", " )\n", " coherence_scores = cm.get_coherence_per_topic()\n", "\n", " str_topics = []\n", " for topic in self.get_topics(): # topic = array of vocab_size floats, one per term\n", " bestn = matutils.argsort(topic, topn=topn, reverse=True) # top terms for topic\n", " beststr = [(topic[_id], self.id2word[_id]) for _id in bestn] # membership, token\n", " str_topics.append(beststr) # list of topn (float membership, token) tuples\n", "\n", " scored_topics = zip(str_topics, coherence_scores)\n", " return sorted(scored_topics, key=lambda tup: tup[1], reverse=True)\n", "\n", " def get_term_topics(self, word_id, minimum_probability=None, normalize=None):\n", " \"\"\"Get the most relevant topics to the given word.\n", "\n", " Parameters\n", " ----------\n", " word_id : int\n", " The word for which the topic distribution will be computed.\n", " minimum_probability : float, optional\n", " If `normalize` is True, topics with smaller probabilities are filtered out.\n", " If `normalize` is False, topics with smaller factors are filtered out.\n", " If set to None, a value of 1e-8 is used to prevent 0s.\n", " normalize: bool or None, optional\n", " Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c.\n", "\n", " Returns\n", " -------\n", " list of (int, float)\n", " The relevant topics represented as pairs of their ID and their assigned probability, sorted\n", " by relevance to the given word.\n", "\n", " \"\"\"\n", " if minimum_probability is None:\n", " minimum_probability = self.minimum_probability\n", " minimum_probability = max(minimum_probability, 1e-8)\n", "\n", " # if user enters word instead of id in vocab, change to get id\n", " if isinstance(word_id, str):\n", " word_id = self.id2word.doc2bow([word_id])[0][0]\n", "\n", " values = []\n", "\n", " word_topics = self._W[word_id]\n", "\n", " if normalize is None:\n", " normalize = self.normalize\n", " if normalize and word_topics.sum() > 0:\n", " word_topics /= word_topics.sum()\n", "\n", " for topic_id in range(0, self.num_topics):\n", " word_coef = word_topics[topic_id]\n", "\n", " if word_coef >= minimum_probability:\n", " values.append((topic_id, word_coef))\n", "\n", " return values\n", "\n", " def get_document_topics(self, bow, minimum_probability=None,\n", " normalize=None):\n", " \"\"\"Get the topic distribution for the given document.\n", "\n", " Parameters\n", " ----------\n", " bow : list of (int, float)\n", " The document in BOW format.\n", " minimum_probability : float\n", " If `normalize` is True, topics with smaller probabilities are filtered out.\n", " If `normalize` is False, topics with smaller factors are filtered out.\n", " If set to None, a value of 1e-8 is used to prevent 0s.\n", " normalize: bool or None, optional\n", " Whether to normalize the result. Allows for estimation of perplexity, coherence, e.t.c.\n", "\n", " Returns\n", " -------\n", " list of (int, float)\n", " Topic distribution for the whole document. Each element in the list is a pair of a topic's id, and\n", " the probability that was assigned to it.\n", "\n", " \"\"\"\n", " if minimum_probability is None:\n", " minimum_probability = self.minimum_probability\n", " minimum_probability = max(minimum_probability, 1e-8)\n", "\n", " # if the input vector is a corpus, return a transformed corpus\n", " is_corpus, corpus = utils.is_corpus(bow)\n", "\n", " if is_corpus:\n", " kwargs = dict(minimum_probability=minimum_probability)\n", " return self._apply(corpus, **kwargs)\n", "\n", " v = matutils.corpus2csc([bow], self.num_tokens)\n", " h = self._solveproj(v, self._W, v_max=np.inf)\n", "\n", " if normalize is None:\n", " normalize = self.normalize\n", " if normalize:\n", " the_sum = h.sum()\n", " if the_sum:\n", " h /= the_sum\n", "\n", " return [\n", " (idx, proba)\n", " for idx, proba in enumerate(h[:, 0])\n", " if not minimum_probability or proba > minimum_probability\n", " ]\n", "\n", " def _setup(self, v):\n", " \"\"\"Infer info from the first batch and initialize the matrices.\n", "\n", " Parameters\n", " ----------\n", " v : `csc_matrix` with the shape (n_tokens, chunksize)\n", " Batch of bows.\n", "\n", " \"\"\"\n", " self.w_std = np.sqrt(v.mean() / (self.num_tokens * self.num_topics))\n", "\n", " self._W = np.abs(\n", " self.w_std\n", " * halfnorm.rvs(\n", " size=(self.num_tokens, self.num_topics), random_state=self.random_state\n", " )\n", " )\n", "\n", " self.A = np.zeros((self.num_topics, self.num_topics))\n", " self.B = np.zeros((self.num_tokens, self.num_topics))\n", "\n", " def l2_norm(self, v):\n", " Wt = self._W.T\n", "\n", " l2 = 0\n", "\n", " for doc, doc_topics in zip(v.T, self._h.T):\n", " l2 += np.sum(np.square((doc - doc_topics.dot(Wt))))\n", "\n", " return np.sqrt(l2)\n", "\n", " def update(self, corpus, chunksize=None, passes=None, eval_every=None):\n", " \"\"\"Train the model with new documents.\n", "\n", " Parameters\n", " ----------\n", " corpus : iterable of list of (int, float) or `csc_matrix` with the shape (n_tokens, n_documents)\n", " Training corpus.\n", " Can be either iterable of documents, which are lists of `(word_id, word_count)`,\n", " or a sparse csc matrix of BOWs for each document.\n", " If not specified, the model is left uninitialized (presumably, to be trained later with `self.train()`).\n", " chunksize: int, optional\n", " Number of documents to be used in each training chunk.\n", " passes: int, optional\n", " Number of full passes over the training corpus.\n", " Leave at default `passes=1` if your input is an iterator.\n", " eval_every: int, optional\n", " Number of batches after which l2 norm of (v - Wh) is computed. Decreases performance if set too low.\n", "\n", " \"\"\"\n", " # use parameters given in constructor, unless user explicitly overrode them\n", " if passes is None:\n", " passes = self.passes\n", " if eval_every is None:\n", " eval_every = self.eval_every\n", "\n", " lencorpus = np.inf\n", "\n", " if isinstance(corpus, scipy.sparse.csc.csc_matrix):\n", " lencorpus = corpus.shape[1]\n", " else:\n", " try:\n", " lencorpus = len(corpus)\n", " except TypeError:\n", " logger.info(\"input corpus stream has no len()\")\n", "\n", " if chunksize is None:\n", " chunksize = min(lencorpus, self.chunksize)\n", "\n", " evalafter = min(lencorpus, (eval_every or 0) * chunksize)\n", "\n", " if lencorpus == 0:\n", " logger.warning(\"Nmf.update() called with an empty corpus\")\n", " return\n", "\n", " if isinstance(corpus, collections.abc.Iterator) and self.passes > 1:\n", " raise ValueError(\"Corpus is an iterator, only `passes=1` is valid.\")\n", "\n", " logger.info(\n", " \"running NMF training, %s topics, %i passes over the supplied corpus of %s documents, evaluating L2 \"\n", " \"norm every %i documents\",\n", " self.num_topics, passes, \"unknown number of\" if lencorpus is None else lencorpus, evalafter,\n", " )\n", "\n", " chunk_overall_idx = 1\n", "\n", " for pass_ in range(passes):\n", " if isinstance(corpus, scipy.sparse.csc.csc_matrix):\n", " grouper = (\n", " # Older scipy (0.19 etc) throw an error when slicing beyond the actual sparse array dimensions, so\n", " # we clip manually with min() here.\n", "\n", " corpus[:, col_idx:min(corpus.shape[1], col_idx + self.chunksize)]\n", " for col_idx\n", " in range(0, corpus.shape[1], self.chunksize)\n", " )\n", " else:\n", " grouper = utils.grouper(corpus, self.chunksize)\n", "\n", " for chunk_idx, chunk in enumerate(grouper):\n", " if isinstance(corpus, scipy.sparse.csc.csc_matrix):\n", " v = chunk[:, self.random_state.permutation(chunk.shape[1])]\n", "\n", " chunk_len = v.shape[1]\n", " else:\n", " self.random_state.shuffle(chunk)\n", "\n", " v = matutils.corpus2csc(\n", " chunk,\n", " num_terms=self.num_tokens,\n", " )\n", "\n", " chunk_len = len(chunk)\n", "\n", " if np.isinf(lencorpus):\n", " logger.info(\n", " \"PROGRESS: pass %i, at document #%i\",\n", " pass_, chunk_idx * chunksize + chunk_len\n", " )\n", " else:\n", " logger.info(\n", " \"PROGRESS: pass %i, at document #%i/%i\",\n", " pass_, chunk_idx * chunksize + chunk_len, lencorpus\n", " )\n", "\n", " if self._W is None:\n", " # If `self._W` is not set (i.e. the first batch being handled), compute the initial matrix using the\n", " # batch mean.\n", "\n", " self._setup(v)\n", "\n", " self._h = self._solveproj(v, self._W, h=self._h, v_max=self.v_max)\n", " h = self._h\n", "\n", " if eval_every and (((chunk_idx + 1) * chunksize >= lencorpus) or (chunk_idx + 1) % eval_every == 0):\n", " logger.info(\"L2 norm: %s\", self.l2_norm(v))\n", " self.print_topics(5)\n", "\n", " self.A *= chunk_overall_idx - 1\n", " self.A += h.dot(h.T)\n", " self.A /= chunk_overall_idx\n", "\n", " self.B *= chunk_overall_idx - 1\n", " self.B += v.dot(h.T)\n", " self.B /= chunk_overall_idx\n", "\n", " self._solve_w()\n", "\n", " chunk_overall_idx += 1\n", "\n", " logger.info(\"W error: %s\", self._w_error)\n", "\n", " def _solve_w(self):\n", " \"\"\"Update W.\"\"\"\n", "\n", " def error(WA):\n", " \"\"\"An optimized version of 0.5 * trace(WtWA) - trace(WtB).\"\"\"\n", " return 0.5 * np.einsum('ij,ij', WA, self._W) - np.einsum('ij,ij', self._W, self.B)\n", "\n", " eta = self._kappa / np.linalg.norm(self.A)\n", "\n", " for iter_number in range(self._w_max_iter):\n", " logger.debug(\"w_error: %s\", self._w_error)\n", "\n", " WA = self._W.dot(self.A)\n", "\n", " self._W -= eta * (WA - self.B)\n", " self._transform()\n", "\n", " error_ = error(WA)\n", "\n", " if (\n", " self._w_error < np.inf\n", " and np.abs((error_ - self._w_error) / self._w_error) < self._w_stop_condition\n", " ):\n", " self._w_error = error_\n", " break\n", "\n", " self._w_error = error_\n", "\n", " def _apply(self, corpus, chunksize=None, **kwargs):\n", " \"\"\"Apply the transformation to a whole corpus and get the result as another corpus.\n", "\n", " Parameters\n", " ----------\n", " corpus : iterable of list of (int, float) or `csc_matrix` with the shape (n_tokens, n_documents)\n", " Training corpus.\n", " Can be either iterable of documents, which are lists of `(word_id, word_count)`,\n", " or a sparse csc matrix of BOWs for each document.\n", " If not specified, the model is left uninitialized (presumably, to be trained later with `self.train()`).\n", " chunksize : int, optional\n", " If provided, a more effective processing will performed.\n", "\n", " Returns\n", " -------\n", " :class:`~gensim.interfaces.TransformedCorpus`\n", " Transformed corpus.\n", "\n", " \"\"\"\n", " return TransformedCorpus(self, corpus, chunksize, **kwargs)\n", "\n", " def _transform(self):\n", " \"\"\"Apply boundaries on W.\"\"\"\n", " np.clip(self._W, 0, self.v_max, out=self._W)\n", " sumsq = np.sqrt(np.einsum('ij,ij->j', self._W, self._W))\n", " np.maximum(sumsq, 1, out=sumsq)\n", " self._W /= sumsq\n", "\n", " @staticmethod\n", " def _dense_dot_csc(dense, csc):\n", " if OLD_SCIPY:\n", " return (csc.T.dot(dense.T)).T\n", " else:\n", " return scipy.sparse.csc_matrix.dot(dense, csc)\n", "\n", " def _solveproj(self, v, W, h=None, v_max=None):\n", " \"\"\"Update residuals and representation (h) matrices.\n", "\n", " Parameters\n", " ----------\n", " v : scipy.sparse.csc_matrix\n", " Subset of training corpus.\n", " W : ndarray\n", " Dictionary matrix.\n", " h : ndarray\n", " Representation matrix.\n", " v_max : float\n", " Maximum possible value in matrices.\n", "\n", " \"\"\"\n", " m, n = W.shape\n", " if v_max is not None:\n", " self.v_max = v_max\n", " elif self.v_max is None:\n", " self.v_max = v.max()\n", "\n", " batch_size = v.shape[1]\n", " hshape = (n, batch_size)\n", "\n", " if h is None or h.shape != hshape:\n", " h = np.zeros(hshape)\n", "\n", " Wt = W.T\n", " WtW = Wt.dot(W)\n", "\n", " h_error = None\n", "\n", " for iter_number in range(self._h_max_iter):\n", " logger.debug(\"h_error: %s\", h_error)\n", "\n", " Wtv = self._dense_dot_csc(Wt, v)\n", "\n", " permutation = self.random_state.permutation(self.num_topics).astype(np.int32)\n", "\n", " error_ = solve_h(h, Wtv, WtW, permutation, self._kappa)\n", "\n", " error_ /= m\n", "\n", " if h_error and np.abs(h_error - error_) < self._h_stop_condition:\n", " break\n", "\n", " h_error = error_\n", "\n", " return h\n" ]
[ 0, 0.010638297872340425, 0, 0, 0.01098901098901099, 0, 0, 0, 0, 0.009433962264150943, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0, 0, 0, 0, 0.010101010101010102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009615384615384616, 0, 0.01098901098901099, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.008620689655172414, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.008695652173913044, 0, 0.010752688172043012, 0, 0.008547008547008548, 0, 0, 0.011764705882352941, 0.009615384615384616, 0, 0, 0, 0, 0, 0, 0, 0, 0.009345794392523364, 0, 0.011363636363636364, 0.012048192771084338, 0, 0, 0, 0, 0.01, 0, 0, 0, 0.01, 0, 0.008849557522123894, 0, 0.01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0, 0.01020408163265306, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010101010101010102, 0, 0, 0, 0, 0, 0.008849557522123894, 0.009345794392523364, 0, 0, 0.008333333333333333, 0, 0, 0, 0, 0.009174311926605505, 0, 0, 0.01, 0, 0, 0, 0, 0.008849557522123894, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009523809523809525, 0.009900990099009901, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0.01, 0, 0, 0, 0, 0.011235955056179775, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010752688172043012, 0.010526315789473684, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0.01, 0, 0, 0, 0, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009523809523809525, 0, 0.010752688172043012, 0, 0.008547008547008548, 0, 0.00847457627118644, 0, 0.010101010101010102, 0, 0.008403361344537815, 0, 0.00909090909090909, 0, 0.009174311926605505, 0, 0, 0, 0.009523809523809525, 0.008547008547008548, 0, 0.010638297872340425, 0, 0.00847457627118644, 0, 0, 0, 0, 0, 0.008333333333333333, 0.009259259259259259, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010869565217391304, 0.010869565217391304, 0.010638297872340425, 0.011363636363636364, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0.012048192771084338, 0, 0, 0.01, 0, 0, 0, 0, 0.009615384615384616, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0.012048192771084338, 0, 0, 0.01, 0, 0, 0, 0, 0.009009009009009009, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009523809523809525, 0, 0.010752688172043012, 0, 0.008547008547008548, 0, 0, 0, 0, 0, 0, 0.008849557522123894, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0.008771929824561403, 0, 0.009523809523809525, 0, 0, 0, 0, 0, 0, 0, 0.008403361344537815, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.008264462809917356, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0.008547008547008548, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010526315789473684, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010638297872340425, 0, 0, 0, 0, 0, 0, 0, 0.010869565217391304, 0, 0, 0, 0.009523809523809525, 0, 0.010752688172043012, 0, 0.008547008547008548, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
783
0.00118
import json import logging from lxml import etree from lxml.html import rewrite_links from xmodule.timeinfo import TimeInfo from xmodule.capa_module import ComplexEncoder from xmodule.editing_module import EditingDescriptor from xmodule.progress import Progress from xmodule.stringify import stringify_children from xmodule.xml_module import XmlDescriptor import self_assessment_module import open_ended_module from .combined_open_ended_rubric import CombinedOpenEndedRubric, GRADER_TYPE_IMAGE_DICT, HUMAN_GRADER_TYPE, LEGEND_LIST log = logging.getLogger("mitx.courseware") # Set the default number of max attempts. Should be 1 for production # Set higher for debugging/testing # attempts specified in xml definition overrides this. MAX_ATTEMPTS = 1 #The highest score allowed for the overall xmodule and for each rubric point MAX_SCORE_ALLOWED = 50 #If true, default behavior is to score module as a practice problem. Otherwise, no grade at all is shown in progress #Metadata overrides this. IS_SCORED = False #If true, then default behavior is to require a file upload or pasted link from a student for this problem. #Metadata overrides this. ACCEPT_FILE_UPLOAD = False #Contains all reasonable bool and case combinations of True TRUE_DICT = ["True", True, "TRUE", "true"] HUMAN_TASK_TYPE = { 'selfassessment': "Self Assessment", 'openended': "edX Assessment", } #Default value that controls whether or not to skip basic spelling checks in the controller #Metadata overrides this SKIP_BASIC_CHECKS = False class CombinedOpenEndedV1Module(): """ This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc). It transitions between problems, and support arbitrary ordering. Each combined open ended module contains one or multiple "child" modules. Child modules track their own state, and can transition between states. They also implement get_html and handle_ajax. The combined open ended module transitions between child modules as appropriate, tracks its own state, and passess ajax requests from the browser to the child module or handles them itself (in the cases of reset and next problem) ajax actions implemented by all children are: 'save_answer' -- Saves the student answer 'save_assessment' -- Saves the student assessment (or external grader assessment) 'save_post_assessment' -- saves a post assessment (hint, feedback on feedback, etc) ajax actions implemented by combined open ended module are: 'reset' -- resets the whole combined open ended module and returns to the first child moduleresource_string 'next_problem' -- moves to the next child module 'get_results' -- gets results from a given child module Types of children. Task is synonymous with child module, so each combined open ended module incorporates multiple children (tasks): openendedmodule selfassessmentmodule """ STATE_VERSION = 1 # states INITIAL = 'initial' ASSESSING = 'assessing' INTERMEDIATE_DONE = 'intermediate_done' DONE = 'done' #Where the templates live for this problem TEMPLATE_DIR = "combinedopenended" def __init__(self, system, location, definition, descriptor, instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs): """ Definition file should have one or many task blocks, a rubric block, and a prompt block: Sample file: <combinedopenended attempts="10000"> <rubric> Blah blah rubric. </rubric> <prompt> Some prompt. </prompt> <task> <selfassessment> <hintprompt> What hint about this problem would you give to someone? </hintprompt> <submitmessage> Save Succcesful. Thanks for participating! </submitmessage> </selfassessment> </task> <task> <openended min_score_to_attempt="1" max_score_to_attempt="1"> <openendedparam> <initial_display>Enter essay here.</initial_display> <answer_display>This is the answer.</answer_display> <grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload> </openendedparam> </openended> </task> </combinedopenended> """ self.instance_state = instance_state self.display_name = instance_state.get('display_name', "Open Ended") self.rewrite_content_links = static_data.get('rewrite_content_links', "") #We need to set the location here so the child modules can use it system.set('location', location) self.system = system #Tells the system which xml definition to load self.current_task_number = instance_state.get('current_task_number', 0) #This loads the states of the individual children self.task_states = instance_state.get('task_states', []) #Overall state of the combined open ended module self.state = instance_state.get('state', self.INITIAL) self.student_attempts = instance_state.get('student_attempts', 0) self.weight = instance_state.get('weight', 1) #Allow reset is true if student has failed the criteria to move to the next child task self.ready_to_reset = instance_state.get('ready_to_reset', False) self.attempts = self.instance_state.get('attempts', MAX_ATTEMPTS) self.is_scored = self.instance_state.get('is_graded', IS_SCORED) in TRUE_DICT self.accept_file_upload = self.instance_state.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT self.skip_basic_checks = self.instance_state.get('skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT due_date = self.instance_state.get('due', None) grace_period_string = self.instance_state.get('graceperiod', None) try: self.timeinfo = TimeInfo(due_date, grace_period_string) except Exception: log.error("Error parsing due date information in location {0}".format(location)) raise self.display_due_date = self.timeinfo.display_due_date self.rubric_renderer = CombinedOpenEndedRubric(system, True) rubric_string = stringify_children(definition['rubric']) self._max_score = self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED) #Static data is passed to the child modules to render self.static_data = { 'max_score': self._max_score, 'max_attempts': self.attempts, 'prompt': definition['prompt'], 'rubric': definition['rubric'], 'display_name': self.display_name, 'accept_file_upload': self.accept_file_upload, 'close_date': self.timeinfo.close_date, 's3_interface': self.system.s3_interface, 'skip_basic_checks': self.skip_basic_checks, } self.task_xml = definition['task_xml'] self.location = location self.setup_next_task() def get_tag_name(self, xml): """ Gets the tag name of a given xml block. Input: XML string Output: The name of the root tag """ tag = etree.fromstring(xml).tag return tag def overwrite_state(self, current_task_state): """ Overwrites an instance state and sets the latest response to the current response. This is used to ensure that the student response is carried over from the first child to the rest. Input: Task state json string Output: Task state json string """ last_response_data = self.get_last_response(self.current_task_number - 1) last_response = last_response_data['response'] loaded_task_state = json.loads(current_task_state) if loaded_task_state['child_state'] == self.INITIAL: loaded_task_state['child_state'] = self.ASSESSING loaded_task_state['child_created'] = True loaded_task_state['child_history'].append({'answer': last_response}) current_task_state = json.dumps(loaded_task_state) return current_task_state def child_modules(self): """ Returns the constructors associated with the child modules in a dictionary. This makes writing functions simpler (saves code duplication) Input: None Output: A dictionary of dictionaries containing the descriptor functions and module functions """ child_modules = { 'openended': open_ended_module.OpenEndedModule, 'selfassessment': self_assessment_module.SelfAssessmentModule, } child_descriptors = { 'openended': open_ended_module.OpenEndedDescriptor, 'selfassessment': self_assessment_module.SelfAssessmentDescriptor, } children = { 'modules': child_modules, 'descriptors': child_descriptors, } return children def setup_next_task(self, reset=False): """ Sets up the next task for the module. Creates an instance state if none exists, carries over the answer from the last instance state to the next if needed. Input: A boolean indicating whether or not the reset function is calling. Output: Boolean True (not useful right now) """ current_task_state = None if len(self.task_states) > self.current_task_number: current_task_state = self.task_states[self.current_task_number] self.current_task_xml = self.task_xml[self.current_task_number] if self.current_task_number > 0: self.ready_to_reset = self.check_allow_reset() if self.ready_to_reset: self.current_task_number = self.current_task_number - 1 current_task_type = self.get_tag_name(self.current_task_xml) children = self.child_modules() child_task_module = children['modules'][current_task_type] self.current_task_descriptor = children['descriptors'][current_task_type](self.system) #This is the xml object created from the xml definition of the current task etree_xml = etree.fromstring(self.current_task_xml) #This sends the etree_xml object through the descriptor module of the current task, and #returns the xml parsed by the descriptor self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system) if current_task_state is None and self.current_task_number == 0: self.current_task = child_task_module(self.system, self.location, self.current_task_parsed_xml, self.current_task_descriptor, self.static_data) self.task_states.append(self.current_task.get_instance_state()) self.state = self.ASSESSING elif current_task_state is None and self.current_task_number > 0: last_response_data = self.get_last_response(self.current_task_number - 1) last_response = last_response_data['response'] current_task_state = json.dumps({ 'child_state': self.ASSESSING, 'version': self.STATE_VERSION, 'max_score': self._max_score, 'child_attempts': 0, 'child_created': True, 'child_history': [{'answer': last_response}], }) self.current_task = child_task_module(self.system, self.location, self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, instance_state=current_task_state) self.task_states.append(self.current_task.get_instance_state()) self.state = self.ASSESSING else: if self.current_task_number > 0 and not reset: current_task_state = self.overwrite_state(current_task_state) self.current_task = child_task_module(self.system, self.location, self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, instance_state=current_task_state) return True def check_allow_reset(self): """ Checks to see if the student has passed the criteria to move to the next module. If not, sets allow_reset to true and halts the student progress through the tasks. Input: None Output: the allow_reset attribute of the current module. """ if not self.ready_to_reset: if self.current_task_number > 0: last_response_data = self.get_last_response(self.current_task_number - 1) current_response_data = self.get_current_attributes(self.current_task_number) if (current_response_data['min_score_to_attempt'] > last_response_data['score'] or current_response_data['max_score_to_attempt'] < last_response_data['score']): self.state = self.DONE self.ready_to_reset = True return self.ready_to_reset def get_context(self): """ Generates a context dictionary that is used to render html. Input: None Output: A dictionary that can be rendered into the combined open ended template. """ task_html = self.get_html_base() #set context variables and render template context = { 'items': [{'content': task_html}], 'ajax_url': self.system.ajax_url, 'allow_reset': self.ready_to_reset, 'state': self.state, 'task_count': len(self.task_xml), 'task_number': self.current_task_number + 1, 'status': self.get_status(False), 'display_name': self.display_name, 'accept_file_upload': self.accept_file_upload, 'location': self.location, 'legend_list': LEGEND_LIST, } return context def get_html(self): """ Gets HTML for rendering. Input: None Output: rendered html """ context = self.get_context() html = self.system.render_template('{0}/combined_open_ended.html'.format(self.TEMPLATE_DIR), context) return html def get_html_nonsystem(self): """ Gets HTML for rendering via AJAX. Does not use system, because system contains some additional html, which is not appropriate for returning via ajax calls. Input: None Output: HTML rendered directly via Mako """ context = self.get_context() html = self.system.render_template('{0}/combined_open_ended.html'.format(self.TEMPLATE_DIR), context) return html def get_html_base(self): """ Gets the HTML associated with the current child task Input: None Output: Child task HTML """ self.update_task_states() html = self.current_task.get_html(self.system) return_html = html try: #Without try except block, get this error: # File "/home/vik/mitx_all/mitx/common/lib/xmodule/xmodule/x_module.py", line 263, in rewrite_content_links # if link.startswith(XASSET_SRCREF_PREFIX): # Placing try except so that if the error is fixed, this code will start working again. return_html = rewrite_links(html, self.rewrite_content_links) except Exception: pass return return_html def get_current_attributes(self, task_number): """ Gets the min and max score to attempt attributes of the specified task. Input: The number of the task. Output: The minimum and maximum scores needed to move on to the specified task. """ task_xml = self.task_xml[task_number] etree_xml = etree.fromstring(task_xml) min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0)) max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score)) return {'min_score_to_attempt': min_score_to_attempt, 'max_score_to_attempt': max_score_to_attempt} def get_last_response(self, task_number): """ Returns data associated with the specified task number, such as the last response, score, etc. Input: The number of the task. Output: A dictionary that contains information about the specified task. """ last_response = "" task_state = self.task_states[task_number] task_xml = self.task_xml[task_number] task_type = self.get_tag_name(task_xml) children = self.child_modules() task_descriptor = children['descriptors'][task_type](self.system) etree_xml = etree.fromstring(task_xml) min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0)) max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score)) task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system) task = children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor, self.static_data, instance_state=task_state) last_response = task.latest_answer() last_score = task.latest_score() all_scores = task.all_scores() last_post_assessment = task.latest_post_assessment(self.system) last_post_feedback = "" feedback_dicts = [{}] grader_ids = [0] submission_ids = [0] if task_type == "openended": last_post_assessment = task.latest_post_assessment(self.system, short_feedback=False, join_feedback=False) if isinstance(last_post_assessment, list): eval_list = [] for i in xrange(0, len(last_post_assessment)): eval_list.append(task.format_feedback_with_evaluation(self.system, last_post_assessment[i])) last_post_evaluation = "".join(eval_list) else: last_post_evaluation = task.format_feedback_with_evaluation(self.system, last_post_assessment) last_post_assessment = last_post_evaluation try: rubric_data = task._parse_score_msg(task.child_history[-1].get('post_assessment', ""), self.system) except Exception: log.debug("Could not parse rubric data from child history. " "Likely we have not yet initialized a previous step, so this is perfectly fine.") rubric_data = {} rubric_scores = rubric_data.get('rubric_scores') grader_types = rubric_data.get('grader_types') feedback_items = rubric_data.get('feedback_items') feedback_dicts = rubric_data.get('feedback_dicts') grader_ids = rubric_data.get('grader_ids') submission_ids = rubric_data.get('submission_ids') elif task_type == "selfassessment": rubric_scores = last_post_assessment grader_types = ['SA'] feedback_items = [''] last_post_assessment = "" last_correctness = task.is_last_response_correct() max_score = task.max_score() state = task.child_state if task_type in HUMAN_TASK_TYPE: human_task_name = HUMAN_TASK_TYPE[task_type] else: human_task_name = task_type if state in task.HUMAN_NAMES: human_state = task.HUMAN_NAMES[state] else: human_state = state if grader_types is not None and len(grader_types) > 0: grader_type = grader_types[0] else: grader_type = "IN" if grader_type in HUMAN_GRADER_TYPE: human_grader_name = HUMAN_GRADER_TYPE[grader_type] else: human_grader_name = grader_type last_response_dict = { 'response': last_response, 'score': last_score, 'all_scores': all_scores, 'post_assessment': last_post_assessment, 'type': task_type, 'max_score': max_score, 'state': state, 'human_state': human_state, 'human_task': human_task_name, 'correct': last_correctness, 'min_score_to_attempt': min_score_to_attempt, 'max_score_to_attempt': max_score_to_attempt, 'rubric_scores': rubric_scores, 'grader_types': grader_types, 'feedback_items': feedback_items, 'grader_type': grader_type, 'human_grader_type': human_grader_name, 'feedback_dicts': feedback_dicts, 'grader_ids': grader_ids, 'submission_ids': submission_ids, } return last_response_dict def update_task_states(self): """ Updates the task state of the combined open ended module with the task state of the current child module. Input: None Output: boolean indicating whether or not the task state changed. """ changed = False if not self.ready_to_reset: self.task_states[self.current_task_number] = self.current_task.get_instance_state() current_task_state = json.loads(self.task_states[self.current_task_number]) if current_task_state['child_state'] == self.DONE: self.current_task_number += 1 if self.current_task_number >= (len(self.task_xml)): self.state = self.DONE self.current_task_number = len(self.task_xml) - 1 else: self.state = self.INITIAL changed = True self.setup_next_task() return changed def update_task_states_ajax(self, return_html): """ Runs the update task states function for ajax calls. Currently the same as update_task_states Input: The html returned by the handle_ajax function of the child Output: New html that should be rendered """ changed = self.update_task_states() if changed: #return_html=self.get_html() pass return return_html def get_rubric(self, get): """ Gets the results of a given grader via ajax. Input: AJAX get dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ all_responses = [] loop_up_to_task = self.current_task_number + 1 for i in xrange(0, loop_up_to_task): all_responses.append(self.get_last_response(i)) rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0, len(all_responses)) if len(all_responses[i]['rubric_scores']) > 0 and all_responses[i]['grader_types'][ 0] in HUMAN_GRADER_TYPE.keys()] grader_types = [all_responses[i]['grader_types'] for i in xrange(0, len(all_responses)) if len(all_responses[i]['grader_types']) > 0 and all_responses[i]['grader_types'][ 0] in HUMAN_GRADER_TYPE.keys()] feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0, len(all_responses)) if len(all_responses[i]['feedback_items']) > 0 and all_responses[i]['grader_types'][ 0] in HUMAN_GRADER_TYPE.keys()] rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']), rubric_scores, grader_types, feedback_items) response_dict = all_responses[-1] context = { 'results': rubric_html, 'task_name': 'Scored Rubric', 'class_name': 'combined-rubric-container' } html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) return {'html': html, 'success': True} def get_legend(self, get): """ Gets the results of a given grader via ajax. Input: AJAX get dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ context = { 'legend_list': LEGEND_LIST, } html = self.system.render_template('{0}/combined_open_ended_legend.html'.format(self.TEMPLATE_DIR), context) return {'html': html, 'success': True} def get_results(self, get): """ Gets the results of a given grader via ajax. Input: AJAX get dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ self.update_task_states() loop_up_to_task = self.current_task_number + 1 all_responses = [] for i in xrange(0, loop_up_to_task): all_responses.append(self.get_last_response(i)) context_list = [] for ri in all_responses: for i in xrange(0, len(ri['rubric_scores'])): feedback = ri['feedback_dicts'][i].get('feedback', '') rubric_data = self.rubric_renderer.render_rubric(stringify_children(self.static_data['rubric']), ri['rubric_scores'][i]) if rubric_data['success']: rubric_html = rubric_data['html'] else: rubric_html = '' context = { 'rubric_html': rubric_html, 'grader_type': ri['grader_type'], 'feedback': feedback, 'grader_id': ri['grader_ids'][i], 'submission_id': ri['submission_ids'][i], } context_list.append(context) feedback_table = self.system.render_template('{0}/open_ended_result_table.html'.format(self.TEMPLATE_DIR), { 'context_list': context_list, 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT, 'human_grader_types': HUMAN_GRADER_TYPE, 'rows': 50, 'cols': 50, }) context = { 'results': feedback_table, 'task_name': "Feedback", 'class_name': "result-container", } html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context) return {'html': html, 'success': True} def get_status_ajax(self, get): """ Gets the results of a given grader via ajax. Input: AJAX get dictionary Output: Dictionary to be rendered via ajax that contains the result html. """ html = self.get_status(True) return {'html': html, 'success': True} def handle_ajax(self, dispatch, get): """ This is called by courseware.module_render, to handle an AJAX call. "get" is request.POST. Returns a json dictionary: { 'progress_changed' : True/False, 'progress': 'none'/'in_progress'/'done', <other request-specific values here > } """ handlers = { 'next_problem': self.next_problem, 'reset': self.reset, 'get_results': self.get_results, 'get_combined_rubric': self.get_rubric, 'get_status': self.get_status_ajax, 'get_legend': self.get_legend, } if dispatch not in handlers: return_html = self.current_task.handle_ajax(dispatch, get, self.system) return self.update_task_states_ajax(return_html) d = handlers[dispatch](get) return json.dumps(d, cls=ComplexEncoder) def next_problem(self, get): """ Called via ajax to advance to the next problem. Input: AJAX get request. Output: Dictionary to be rendered """ self.update_task_states() return {'success': True, 'html': self.get_html_nonsystem(), 'allow_reset': self.ready_to_reset} def reset(self, get): """ If resetting is allowed, reset the state of the combined open ended module. Input: AJAX get dictionary Output: AJAX dictionary to tbe rendered """ if self.state != self.DONE: if not self.ready_to_reset: return self.out_of_sync_error(get) if self.student_attempts > self.attempts: return { 'success': False, #This is a student_facing_error 'error': ( 'You have attempted this question {0} times. ' 'You are only allowed to attempt it {1} times.' ).format(self.student_attempts, self.attempts) } self.state = self.INITIAL self.ready_to_reset = False for i in xrange(0, len(self.task_xml)): self.current_task_number = i self.setup_next_task(reset=True) self.current_task.reset(self.system) self.task_states[self.current_task_number] = self.current_task.get_instance_state() self.current_task_number = 0 self.ready_to_reset = False self.setup_next_task() return {'success': True, 'html': self.get_html_nonsystem()} def get_instance_state(self): """ Returns the current instance state. The module can be recreated from the instance state. Input: None Output: A dictionary containing the instance state. """ state = { 'version': self.STATE_VERSION, 'current_task_number': self.current_task_number, 'state': self.state, 'task_states': self.task_states, 'student_attempts': self.student_attempts, 'ready_to_reset': self.ready_to_reset, } return json.dumps(state) def get_status(self, render_via_ajax): """ Gets the status panel to be displayed at the top right. Input: None Output: The status html to be rendered """ status = [] for i in xrange(0, self.current_task_number + 1): task_data = self.get_last_response(i) task_data.update({'task_number': i + 1}) status.append(task_data) context = { 'status_list': status, 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT, 'legend_list': LEGEND_LIST, 'render_via_ajax': render_via_ajax, } status_html = self.system.render_template("{0}/combined_open_ended_status.html".format(self.TEMPLATE_DIR), context) return status_html def check_if_done_and_scored(self): """ Checks if the object is currently in a finished state (either student didn't meet criteria to move to next step, in which case they are in the allow_reset state, or they are done with the question entirely, in which case they will be in the self.DONE state), and if it is scored or not. @return: Boolean corresponding to the above. """ return (self.state == self.DONE or self.ready_to_reset) and self.is_scored def get_score(self): """ Score the student received on the problem, or None if there is no score. Returns: dictionary {'score': integer, from 0 to get_max_score(), 'total': get_max_score()} """ max_score = None score = None if self.is_scored and self.weight is not None: #Finds the maximum score of all student attempts and keeps it. score_mat = [] for i in xrange(0, len(self.task_states)): #For each task, extract all student scores on that task (each attempt for each task) last_response = self.get_last_response(i) max_score = last_response.get('max_score', None) score = last_response.get('all_scores', None) if score is not None: #Convert none scores and weight scores properly for z in xrange(0, len(score)): if score[z] is None: score[z] = 0 score[z] *= float(self.weight) score_mat.append(score) if len(score_mat) > 0: #Currently, assume that the final step is the correct one, and that those are the final scores. #This will change in the future, which is why the machinery above exists to extract all scores on all steps #TODO: better final score handling. scores = score_mat[-1] score = max(scores) else: score = 0 if max_score is not None: #Weight the max score if it is not None max_score *= float(self.weight) else: #Without a max_score, we cannot have a score! score = None score_dict = { 'score': score, 'total': max_score, } return score_dict def max_score(self): ''' Maximum score. Two notes: * This is generic; in abstract, a problem could be 3/5 points on one randomization, and 5/7 on another ''' max_score = None if self.check_if_done_and_scored(): last_response = self.get_last_response(self.current_task_number) max_score = last_response['max_score'] return max_score def get_progress(self): ''' Return a progress.Progress object that represents how far the student has gone in this module. Must be implemented to get correct progress tracking behavior in nesting modules like sequence and vertical. If this module has no notion of progress, return None. ''' progress_object = Progress(self.current_task_number, len(self.task_xml)) return progress_object def out_of_sync_error(self, get, msg=''): """ return dict out-of-sync error message, and also log. """ #This is a dev_facing_error log.warning("Combined module state out sync. state: %r, get: %r. %s", self.state, get, msg) #This is a student_facing_error return {'success': False, 'error': 'The problem state got out-of-sync. Please try reloading the page.'} class CombinedOpenEndedV1Descriptor(): """ Module for adding combined open ended questions """ mako_template = "widgets/html-edit.html" module_class = CombinedOpenEndedV1Module filename_extension = "xml" stores_state = True has_score = True template_dir_name = "combinedopenended" def __init__(self, system): self.system = system @classmethod def definition_from_xml(cls, xml_object, system): """ Pull out the individual tasks, the rubric, and the prompt, and parse Returns: { 'rubric': 'some-html', 'prompt': 'some-html', 'task_xml': dictionary of xml strings, } """ expected_children = ['task', 'rubric', 'prompt'] for child in expected_children: if len(xml_object.xpath(child)) == 0: #This is a staff_facing_error raise ValueError( "Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance. {1}".format( child, xml_object)) def parse_task(k): """Assumes that xml_object has child k""" return [stringify_children(xml_object.xpath(k)[i]) for i in xrange(0, len(xml_object.xpath(k)))] def parse(k): """Assumes that xml_object has child k""" return xml_object.xpath(k)[0] return {'task_xml': parse_task('task'), 'prompt': parse('prompt'), 'rubric': parse('rubric')} def definition_to_xml(self, resource_fs): '''Return an xml element representing this definition.''' elt = etree.Element('combinedopenended') def add_child(k): child_str = '<{tag}>{body}</{tag}>'.format(tag=k, body=self.definition[k]) child_node = etree.fromstring(child_str) elt.append(child_node) for child in ['task']: add_child(child) return elt
[ "import json\n", "import logging\n", "from lxml import etree\n", "from lxml.html import rewrite_links\n", "from xmodule.timeinfo import TimeInfo\n", "from xmodule.capa_module import ComplexEncoder\n", "from xmodule.editing_module import EditingDescriptor\n", "from xmodule.progress import Progress\n", "from xmodule.stringify import stringify_children\n", "from xmodule.xml_module import XmlDescriptor\n", "import self_assessment_module\n", "import open_ended_module\n", "from .combined_open_ended_rubric import CombinedOpenEndedRubric, GRADER_TYPE_IMAGE_DICT, HUMAN_GRADER_TYPE, LEGEND_LIST\n", "\n", "log = logging.getLogger(\"mitx.courseware\")\n", "\n", "# Set the default number of max attempts. Should be 1 for production\n", "# Set higher for debugging/testing\n", "# attempts specified in xml definition overrides this.\n", "MAX_ATTEMPTS = 1\n", "\n", "#The highest score allowed for the overall xmodule and for each rubric point\n", "MAX_SCORE_ALLOWED = 50\n", "\n", "#If true, default behavior is to score module as a practice problem. Otherwise, no grade at all is shown in progress\n", "#Metadata overrides this.\n", "IS_SCORED = False\n", "\n", "#If true, then default behavior is to require a file upload or pasted link from a student for this problem.\n", "#Metadata overrides this.\n", "ACCEPT_FILE_UPLOAD = False\n", "\n", "#Contains all reasonable bool and case combinations of True\n", "TRUE_DICT = [\"True\", True, \"TRUE\", \"true\"]\n", "\n", "HUMAN_TASK_TYPE = {\n", " 'selfassessment': \"Self Assessment\",\n", " 'openended': \"edX Assessment\",\n", "}\n", "\n", "#Default value that controls whether or not to skip basic spelling checks in the controller\n", "#Metadata overrides this\n", "SKIP_BASIC_CHECKS = False\n", "\n", "\n", "class CombinedOpenEndedV1Module():\n", " \"\"\"\n", " This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc).\n", " It transitions between problems, and support arbitrary ordering.\n", " Each combined open ended module contains one or multiple \"child\" modules.\n", " Child modules track their own state, and can transition between states. They also implement get_html and\n", " handle_ajax.\n", " The combined open ended module transitions between child modules as appropriate, tracks its own state, and passess\n", " ajax requests from the browser to the child module or handles them itself (in the cases of reset and next problem)\n", " ajax actions implemented by all children are:\n", " 'save_answer' -- Saves the student answer\n", " 'save_assessment' -- Saves the student assessment (or external grader assessment)\n", " 'save_post_assessment' -- saves a post assessment (hint, feedback on feedback, etc)\n", " ajax actions implemented by combined open ended module are:\n", " 'reset' -- resets the whole combined open ended module and returns to the first child moduleresource_string\n", " 'next_problem' -- moves to the next child module\n", " 'get_results' -- gets results from a given child module\n", "\n", " Types of children. Task is synonymous with child module, so each combined open ended module\n", " incorporates multiple children (tasks):\n", " openendedmodule\n", " selfassessmentmodule\n", " \"\"\"\n", " STATE_VERSION = 1\n", "\n", " # states\n", " INITIAL = 'initial'\n", " ASSESSING = 'assessing'\n", " INTERMEDIATE_DONE = 'intermediate_done'\n", " DONE = 'done'\n", "\n", " #Where the templates live for this problem\n", " TEMPLATE_DIR = \"combinedopenended\"\n", "\n", " def __init__(self, system, location, definition, descriptor,\n", " instance_state=None, shared_state=None, metadata=None, static_data=None, **kwargs):\n", "\n", " \"\"\"\n", " Definition file should have one or many task blocks, a rubric block, and a prompt block:\n", "\n", " Sample file:\n", " <combinedopenended attempts=\"10000\">\n", " <rubric>\n", " Blah blah rubric.\n", " </rubric>\n", " <prompt>\n", " Some prompt.\n", " </prompt>\n", " <task>\n", " <selfassessment>\n", " <hintprompt>\n", " What hint about this problem would you give to someone?\n", " </hintprompt>\n", " <submitmessage>\n", " Save Succcesful. Thanks for participating!\n", " </submitmessage>\n", " </selfassessment>\n", " </task>\n", " <task>\n", " <openended min_score_to_attempt=\"1\" max_score_to_attempt=\"1\">\n", " <openendedparam>\n", " <initial_display>Enter essay here.</initial_display>\n", " <answer_display>This is the answer.</answer_display>\n", " <grader_payload>{\"grader_settings\" : \"ml_grading.conf\",\n", " \"problem_id\" : \"6.002x/Welcome/OETest\"}</grader_payload>\n", " </openendedparam>\n", " </openended>\n", " </task>\n", " </combinedopenended>\n", "\n", " \"\"\"\n", "\n", " self.instance_state = instance_state\n", " self.display_name = instance_state.get('display_name', \"Open Ended\")\n", " self.rewrite_content_links = static_data.get('rewrite_content_links', \"\")\n", "\n", " #We need to set the location here so the child modules can use it\n", " system.set('location', location)\n", " self.system = system\n", "\n", " #Tells the system which xml definition to load\n", " self.current_task_number = instance_state.get('current_task_number', 0)\n", " #This loads the states of the individual children\n", " self.task_states = instance_state.get('task_states', [])\n", " #Overall state of the combined open ended module\n", " self.state = instance_state.get('state', self.INITIAL)\n", "\n", " self.student_attempts = instance_state.get('student_attempts', 0)\n", " self.weight = instance_state.get('weight', 1)\n", "\n", " #Allow reset is true if student has failed the criteria to move to the next child task\n", " self.ready_to_reset = instance_state.get('ready_to_reset', False)\n", " self.attempts = self.instance_state.get('attempts', MAX_ATTEMPTS)\n", " self.is_scored = self.instance_state.get('is_graded', IS_SCORED) in TRUE_DICT\n", " self.accept_file_upload = self.instance_state.get('accept_file_upload', ACCEPT_FILE_UPLOAD) in TRUE_DICT\n", " self.skip_basic_checks = self.instance_state.get('skip_spelling_checks', SKIP_BASIC_CHECKS) in TRUE_DICT\n", "\n", " due_date = self.instance_state.get('due', None)\n", "\n", " grace_period_string = self.instance_state.get('graceperiod', None)\n", " try:\n", " self.timeinfo = TimeInfo(due_date, grace_period_string)\n", " except Exception:\n", " log.error(\"Error parsing due date information in location {0}\".format(location))\n", " raise\n", " self.display_due_date = self.timeinfo.display_due_date\n", "\n", " self.rubric_renderer = CombinedOpenEndedRubric(system, True)\n", " rubric_string = stringify_children(definition['rubric'])\n", " self._max_score = self.rubric_renderer.check_if_rubric_is_parseable(rubric_string, location, MAX_SCORE_ALLOWED)\n", "\n", " #Static data is passed to the child modules to render\n", " self.static_data = {\n", " 'max_score': self._max_score,\n", " 'max_attempts': self.attempts,\n", " 'prompt': definition['prompt'],\n", " 'rubric': definition['rubric'],\n", " 'display_name': self.display_name,\n", " 'accept_file_upload': self.accept_file_upload,\n", " 'close_date': self.timeinfo.close_date,\n", " 's3_interface': self.system.s3_interface,\n", " 'skip_basic_checks': self.skip_basic_checks,\n", " }\n", "\n", " self.task_xml = definition['task_xml']\n", " self.location = location\n", " self.setup_next_task()\n", "\n", " def get_tag_name(self, xml):\n", " \"\"\"\n", " Gets the tag name of a given xml block.\n", " Input: XML string\n", " Output: The name of the root tag\n", " \"\"\"\n", " tag = etree.fromstring(xml).tag\n", " return tag\n", "\n", " def overwrite_state(self, current_task_state):\n", " \"\"\"\n", " Overwrites an instance state and sets the latest response to the current response. This is used\n", " to ensure that the student response is carried over from the first child to the rest.\n", " Input: Task state json string\n", " Output: Task state json string\n", " \"\"\"\n", " last_response_data = self.get_last_response(self.current_task_number - 1)\n", " last_response = last_response_data['response']\n", "\n", " loaded_task_state = json.loads(current_task_state)\n", " if loaded_task_state['child_state'] == self.INITIAL:\n", " loaded_task_state['child_state'] = self.ASSESSING\n", " loaded_task_state['child_created'] = True\n", " loaded_task_state['child_history'].append({'answer': last_response})\n", " current_task_state = json.dumps(loaded_task_state)\n", " return current_task_state\n", "\n", " def child_modules(self):\n", " \"\"\"\n", " Returns the constructors associated with the child modules in a dictionary. This makes writing functions\n", " simpler (saves code duplication)\n", " Input: None\n", " Output: A dictionary of dictionaries containing the descriptor functions and module functions\n", " \"\"\"\n", " child_modules = {\n", " 'openended': open_ended_module.OpenEndedModule,\n", " 'selfassessment': self_assessment_module.SelfAssessmentModule,\n", " }\n", " child_descriptors = {\n", " 'openended': open_ended_module.OpenEndedDescriptor,\n", " 'selfassessment': self_assessment_module.SelfAssessmentDescriptor,\n", " }\n", " children = {\n", " 'modules': child_modules,\n", " 'descriptors': child_descriptors,\n", " }\n", " return children\n", "\n", " def setup_next_task(self, reset=False):\n", " \"\"\"\n", " Sets up the next task for the module. Creates an instance state if none exists, carries over the answer\n", " from the last instance state to the next if needed.\n", " Input: A boolean indicating whether or not the reset function is calling.\n", " Output: Boolean True (not useful right now)\n", " \"\"\"\n", " current_task_state = None\n", " if len(self.task_states) > self.current_task_number:\n", " current_task_state = self.task_states[self.current_task_number]\n", "\n", " self.current_task_xml = self.task_xml[self.current_task_number]\n", "\n", " if self.current_task_number > 0:\n", " self.ready_to_reset = self.check_allow_reset()\n", " if self.ready_to_reset:\n", " self.current_task_number = self.current_task_number - 1\n", "\n", " current_task_type = self.get_tag_name(self.current_task_xml)\n", "\n", " children = self.child_modules()\n", " child_task_module = children['modules'][current_task_type]\n", "\n", " self.current_task_descriptor = children['descriptors'][current_task_type](self.system)\n", "\n", " #This is the xml object created from the xml definition of the current task\n", " etree_xml = etree.fromstring(self.current_task_xml)\n", "\n", " #This sends the etree_xml object through the descriptor module of the current task, and\n", " #returns the xml parsed by the descriptor\n", " self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system)\n", " if current_task_state is None and self.current_task_number == 0:\n", " self.current_task = child_task_module(self.system, self.location,\n", " self.current_task_parsed_xml, self.current_task_descriptor,\n", " self.static_data)\n", " self.task_states.append(self.current_task.get_instance_state())\n", " self.state = self.ASSESSING\n", " elif current_task_state is None and self.current_task_number > 0:\n", " last_response_data = self.get_last_response(self.current_task_number - 1)\n", " last_response = last_response_data['response']\n", " current_task_state = json.dumps({\n", " 'child_state': self.ASSESSING,\n", " 'version': self.STATE_VERSION,\n", " 'max_score': self._max_score,\n", " 'child_attempts': 0,\n", " 'child_created': True,\n", " 'child_history': [{'answer': last_response}],\n", " })\n", " self.current_task = child_task_module(self.system, self.location,\n", " self.current_task_parsed_xml, self.current_task_descriptor,\n", " self.static_data,\n", " instance_state=current_task_state)\n", " self.task_states.append(self.current_task.get_instance_state())\n", " self.state = self.ASSESSING\n", " else:\n", " if self.current_task_number > 0 and not reset:\n", " current_task_state = self.overwrite_state(current_task_state)\n", " self.current_task = child_task_module(self.system, self.location,\n", " self.current_task_parsed_xml, self.current_task_descriptor,\n", " self.static_data,\n", " instance_state=current_task_state)\n", "\n", " return True\n", "\n", " def check_allow_reset(self):\n", " \"\"\"\n", " Checks to see if the student has passed the criteria to move to the next module. If not, sets\n", " allow_reset to true and halts the student progress through the tasks.\n", " Input: None\n", " Output: the allow_reset attribute of the current module.\n", " \"\"\"\n", " if not self.ready_to_reset:\n", " if self.current_task_number > 0:\n", " last_response_data = self.get_last_response(self.current_task_number - 1)\n", " current_response_data = self.get_current_attributes(self.current_task_number)\n", " if (current_response_data['min_score_to_attempt'] > last_response_data['score']\n", " or current_response_data['max_score_to_attempt'] < last_response_data['score']):\n", " self.state = self.DONE\n", " self.ready_to_reset = True\n", "\n", " return self.ready_to_reset\n", "\n", " def get_context(self):\n", " \"\"\"\n", " Generates a context dictionary that is used to render html.\n", " Input: None\n", " Output: A dictionary that can be rendered into the combined open ended template.\n", " \"\"\"\n", " task_html = self.get_html_base()\n", " #set context variables and render template\n", "\n", " context = {\n", " 'items': [{'content': task_html}],\n", " 'ajax_url': self.system.ajax_url,\n", " 'allow_reset': self.ready_to_reset,\n", " 'state': self.state,\n", " 'task_count': len(self.task_xml),\n", " 'task_number': self.current_task_number + 1,\n", " 'status': self.get_status(False),\n", " 'display_name': self.display_name,\n", " 'accept_file_upload': self.accept_file_upload,\n", " 'location': self.location,\n", " 'legend_list': LEGEND_LIST,\n", " }\n", "\n", " return context\n", "\n", " def get_html(self):\n", " \"\"\"\n", " Gets HTML for rendering.\n", " Input: None\n", " Output: rendered html\n", " \"\"\"\n", " context = self.get_context()\n", " html = self.system.render_template('{0}/combined_open_ended.html'.format(self.TEMPLATE_DIR), context)\n", " return html\n", "\n", " def get_html_nonsystem(self):\n", " \"\"\"\n", " Gets HTML for rendering via AJAX. Does not use system, because system contains some additional\n", " html, which is not appropriate for returning via ajax calls.\n", " Input: None\n", " Output: HTML rendered directly via Mako\n", " \"\"\"\n", " context = self.get_context()\n", " html = self.system.render_template('{0}/combined_open_ended.html'.format(self.TEMPLATE_DIR), context)\n", " return html\n", "\n", " def get_html_base(self):\n", " \"\"\"\n", " Gets the HTML associated with the current child task\n", " Input: None\n", " Output: Child task HTML\n", " \"\"\"\n", " self.update_task_states()\n", " html = self.current_task.get_html(self.system)\n", " return_html = html\n", " try:\n", " #Without try except block, get this error:\n", " # File \"/home/vik/mitx_all/mitx/common/lib/xmodule/xmodule/x_module.py\", line 263, in rewrite_content_links\n", " # if link.startswith(XASSET_SRCREF_PREFIX):\n", " # Placing try except so that if the error is fixed, this code will start working again.\n", " return_html = rewrite_links(html, self.rewrite_content_links)\n", " except Exception:\n", " pass\n", " return return_html\n", "\n", " def get_current_attributes(self, task_number):\n", " \"\"\"\n", " Gets the min and max score to attempt attributes of the specified task.\n", " Input: The number of the task.\n", " Output: The minimum and maximum scores needed to move on to the specified task.\n", " \"\"\"\n", " task_xml = self.task_xml[task_number]\n", " etree_xml = etree.fromstring(task_xml)\n", " min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0))\n", " max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score))\n", " return {'min_score_to_attempt': min_score_to_attempt, 'max_score_to_attempt': max_score_to_attempt}\n", "\n", " def get_last_response(self, task_number):\n", " \"\"\"\n", " Returns data associated with the specified task number, such as the last response, score, etc.\n", " Input: The number of the task.\n", " Output: A dictionary that contains information about the specified task.\n", " \"\"\"\n", " last_response = \"\"\n", " task_state = self.task_states[task_number]\n", " task_xml = self.task_xml[task_number]\n", " task_type = self.get_tag_name(task_xml)\n", "\n", " children = self.child_modules()\n", "\n", " task_descriptor = children['descriptors'][task_type](self.system)\n", " etree_xml = etree.fromstring(task_xml)\n", "\n", " min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0))\n", " max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score))\n", "\n", " task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system)\n", " task = children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor,\n", " self.static_data, instance_state=task_state)\n", " last_response = task.latest_answer()\n", " last_score = task.latest_score()\n", " all_scores = task.all_scores()\n", " last_post_assessment = task.latest_post_assessment(self.system)\n", " last_post_feedback = \"\"\n", " feedback_dicts = [{}]\n", " grader_ids = [0]\n", " submission_ids = [0]\n", " if task_type == \"openended\":\n", " last_post_assessment = task.latest_post_assessment(self.system, short_feedback=False, join_feedback=False)\n", " if isinstance(last_post_assessment, list):\n", " eval_list = []\n", " for i in xrange(0, len(last_post_assessment)):\n", " eval_list.append(task.format_feedback_with_evaluation(self.system, last_post_assessment[i]))\n", " last_post_evaluation = \"\".join(eval_list)\n", " else:\n", " last_post_evaluation = task.format_feedback_with_evaluation(self.system, last_post_assessment)\n", " last_post_assessment = last_post_evaluation\n", " try:\n", " rubric_data = task._parse_score_msg(task.child_history[-1].get('post_assessment', \"\"), self.system)\n", " except Exception:\n", " log.debug(\"Could not parse rubric data from child history. \"\n", " \"Likely we have not yet initialized a previous step, so this is perfectly fine.\")\n", " rubric_data = {}\n", " rubric_scores = rubric_data.get('rubric_scores')\n", " grader_types = rubric_data.get('grader_types')\n", " feedback_items = rubric_data.get('feedback_items')\n", " feedback_dicts = rubric_data.get('feedback_dicts')\n", " grader_ids = rubric_data.get('grader_ids')\n", " submission_ids = rubric_data.get('submission_ids')\n", " elif task_type == \"selfassessment\":\n", " rubric_scores = last_post_assessment\n", " grader_types = ['SA']\n", " feedback_items = ['']\n", " last_post_assessment = \"\"\n", " last_correctness = task.is_last_response_correct()\n", " max_score = task.max_score()\n", " state = task.child_state\n", " if task_type in HUMAN_TASK_TYPE:\n", " human_task_name = HUMAN_TASK_TYPE[task_type]\n", " else:\n", " human_task_name = task_type\n", "\n", " if state in task.HUMAN_NAMES:\n", " human_state = task.HUMAN_NAMES[state]\n", " else:\n", " human_state = state\n", " if grader_types is not None and len(grader_types) > 0:\n", " grader_type = grader_types[0]\n", " else:\n", " grader_type = \"IN\"\n", "\n", " if grader_type in HUMAN_GRADER_TYPE:\n", " human_grader_name = HUMAN_GRADER_TYPE[grader_type]\n", " else:\n", " human_grader_name = grader_type\n", "\n", " last_response_dict = {\n", " 'response': last_response,\n", " 'score': last_score,\n", " 'all_scores': all_scores,\n", " 'post_assessment': last_post_assessment,\n", " 'type': task_type,\n", " 'max_score': max_score,\n", " 'state': state,\n", " 'human_state': human_state,\n", " 'human_task': human_task_name,\n", " 'correct': last_correctness,\n", " 'min_score_to_attempt': min_score_to_attempt,\n", " 'max_score_to_attempt': max_score_to_attempt,\n", " 'rubric_scores': rubric_scores,\n", " 'grader_types': grader_types,\n", " 'feedback_items': feedback_items,\n", " 'grader_type': grader_type,\n", " 'human_grader_type': human_grader_name,\n", " 'feedback_dicts': feedback_dicts,\n", " 'grader_ids': grader_ids,\n", " 'submission_ids': submission_ids,\n", " }\n", " return last_response_dict\n", "\n", " def update_task_states(self):\n", " \"\"\"\n", " Updates the task state of the combined open ended module with the task state of the current child module.\n", " Input: None\n", " Output: boolean indicating whether or not the task state changed.\n", " \"\"\"\n", " changed = False\n", " if not self.ready_to_reset:\n", " self.task_states[self.current_task_number] = self.current_task.get_instance_state()\n", " current_task_state = json.loads(self.task_states[self.current_task_number])\n", " if current_task_state['child_state'] == self.DONE:\n", " self.current_task_number += 1\n", " if self.current_task_number >= (len(self.task_xml)):\n", " self.state = self.DONE\n", " self.current_task_number = len(self.task_xml) - 1\n", " else:\n", " self.state = self.INITIAL\n", " changed = True\n", " self.setup_next_task()\n", " return changed\n", "\n", " def update_task_states_ajax(self, return_html):\n", " \"\"\"\n", " Runs the update task states function for ajax calls. Currently the same as update_task_states\n", " Input: The html returned by the handle_ajax function of the child\n", " Output: New html that should be rendered\n", " \"\"\"\n", " changed = self.update_task_states()\n", " if changed:\n", " #return_html=self.get_html()\n", " pass\n", " return return_html\n", "\n", " def get_rubric(self, get):\n", " \"\"\"\n", " Gets the results of a given grader via ajax.\n", " Input: AJAX get dictionary\n", " Output: Dictionary to be rendered via ajax that contains the result html.\n", " \"\"\"\n", " all_responses = []\n", " loop_up_to_task = self.current_task_number + 1\n", " for i in xrange(0, loop_up_to_task):\n", " all_responses.append(self.get_last_response(i))\n", " rubric_scores = [all_responses[i]['rubric_scores'] for i in xrange(0, len(all_responses)) if\n", " len(all_responses[i]['rubric_scores']) > 0 and all_responses[i]['grader_types'][\n", " 0] in HUMAN_GRADER_TYPE.keys()]\n", " grader_types = [all_responses[i]['grader_types'] for i in xrange(0, len(all_responses)) if\n", " len(all_responses[i]['grader_types']) > 0 and all_responses[i]['grader_types'][\n", " 0] in HUMAN_GRADER_TYPE.keys()]\n", " feedback_items = [all_responses[i]['feedback_items'] for i in xrange(0, len(all_responses)) if\n", " len(all_responses[i]['feedback_items']) > 0 and all_responses[i]['grader_types'][\n", " 0] in HUMAN_GRADER_TYPE.keys()]\n", " rubric_html = self.rubric_renderer.render_combined_rubric(stringify_children(self.static_data['rubric']),\n", " rubric_scores,\n", " grader_types, feedback_items)\n", "\n", " response_dict = all_responses[-1]\n", " context = {\n", " 'results': rubric_html,\n", " 'task_name': 'Scored Rubric',\n", " 'class_name': 'combined-rubric-container'\n", " }\n", " html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context)\n", " return {'html': html, 'success': True}\n", "\n", " def get_legend(self, get):\n", " \"\"\"\n", " Gets the results of a given grader via ajax.\n", " Input: AJAX get dictionary\n", " Output: Dictionary to be rendered via ajax that contains the result html.\n", " \"\"\"\n", " context = {\n", " 'legend_list': LEGEND_LIST,\n", " }\n", " html = self.system.render_template('{0}/combined_open_ended_legend.html'.format(self.TEMPLATE_DIR), context)\n", " return {'html': html, 'success': True}\n", "\n", " def get_results(self, get):\n", " \"\"\"\n", " Gets the results of a given grader via ajax.\n", " Input: AJAX get dictionary\n", " Output: Dictionary to be rendered via ajax that contains the result html.\n", " \"\"\"\n", " self.update_task_states()\n", " loop_up_to_task = self.current_task_number + 1\n", " all_responses = []\n", " for i in xrange(0, loop_up_to_task):\n", " all_responses.append(self.get_last_response(i))\n", " context_list = []\n", " for ri in all_responses:\n", " for i in xrange(0, len(ri['rubric_scores'])):\n", " feedback = ri['feedback_dicts'][i].get('feedback', '')\n", " rubric_data = self.rubric_renderer.render_rubric(stringify_children(self.static_data['rubric']),\n", " ri['rubric_scores'][i])\n", " if rubric_data['success']:\n", " rubric_html = rubric_data['html']\n", " else:\n", " rubric_html = ''\n", " context = {\n", " 'rubric_html': rubric_html,\n", " 'grader_type': ri['grader_type'],\n", " 'feedback': feedback,\n", " 'grader_id': ri['grader_ids'][i],\n", " 'submission_id': ri['submission_ids'][i],\n", " }\n", " context_list.append(context)\n", " feedback_table = self.system.render_template('{0}/open_ended_result_table.html'.format(self.TEMPLATE_DIR), {\n", " 'context_list': context_list,\n", " 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,\n", " 'human_grader_types': HUMAN_GRADER_TYPE,\n", " 'rows': 50,\n", " 'cols': 50,\n", " })\n", " context = {\n", " 'results': feedback_table,\n", " 'task_name': \"Feedback\",\n", " 'class_name': \"result-container\",\n", " }\n", " html = self.system.render_template('{0}/combined_open_ended_results.html'.format(self.TEMPLATE_DIR), context)\n", " return {'html': html, 'success': True}\n", "\n", " def get_status_ajax(self, get):\n", " \"\"\"\n", " Gets the results of a given grader via ajax.\n", " Input: AJAX get dictionary\n", " Output: Dictionary to be rendered via ajax that contains the result html.\n", " \"\"\"\n", " html = self.get_status(True)\n", " return {'html': html, 'success': True}\n", "\n", " def handle_ajax(self, dispatch, get):\n", " \"\"\"\n", " This is called by courseware.module_render, to handle an AJAX call.\n", " \"get\" is request.POST.\n", "\n", " Returns a json dictionary:\n", " { 'progress_changed' : True/False,\n", " 'progress': 'none'/'in_progress'/'done',\n", " <other request-specific values here > }\n", " \"\"\"\n", "\n", " handlers = {\n", " 'next_problem': self.next_problem,\n", " 'reset': self.reset,\n", " 'get_results': self.get_results,\n", " 'get_combined_rubric': self.get_rubric,\n", " 'get_status': self.get_status_ajax,\n", " 'get_legend': self.get_legend,\n", " }\n", "\n", " if dispatch not in handlers:\n", " return_html = self.current_task.handle_ajax(dispatch, get, self.system)\n", " return self.update_task_states_ajax(return_html)\n", "\n", " d = handlers[dispatch](get)\n", " return json.dumps(d, cls=ComplexEncoder)\n", "\n", " def next_problem(self, get):\n", " \"\"\"\n", " Called via ajax to advance to the next problem.\n", " Input: AJAX get request.\n", " Output: Dictionary to be rendered\n", " \"\"\"\n", " self.update_task_states()\n", " return {'success': True, 'html': self.get_html_nonsystem(), 'allow_reset': self.ready_to_reset}\n", "\n", " def reset(self, get):\n", " \"\"\"\n", " If resetting is allowed, reset the state of the combined open ended module.\n", " Input: AJAX get dictionary\n", " Output: AJAX dictionary to tbe rendered\n", " \"\"\"\n", " if self.state != self.DONE:\n", " if not self.ready_to_reset:\n", " return self.out_of_sync_error(get)\n", "\n", " if self.student_attempts > self.attempts:\n", " return {\n", " 'success': False,\n", " #This is a student_facing_error\n", " 'error': (\n", " 'You have attempted this question {0} times. '\n", " 'You are only allowed to attempt it {1} times.'\n", " ).format(self.student_attempts, self.attempts)\n", " }\n", " self.state = self.INITIAL\n", " self.ready_to_reset = False\n", " for i in xrange(0, len(self.task_xml)):\n", " self.current_task_number = i\n", " self.setup_next_task(reset=True)\n", " self.current_task.reset(self.system)\n", " self.task_states[self.current_task_number] = self.current_task.get_instance_state()\n", " self.current_task_number = 0\n", " self.ready_to_reset = False\n", " self.setup_next_task()\n", " return {'success': True, 'html': self.get_html_nonsystem()}\n", "\n", " def get_instance_state(self):\n", " \"\"\"\n", " Returns the current instance state. The module can be recreated from the instance state.\n", " Input: None\n", " Output: A dictionary containing the instance state.\n", " \"\"\"\n", "\n", " state = {\n", " 'version': self.STATE_VERSION,\n", " 'current_task_number': self.current_task_number,\n", " 'state': self.state,\n", " 'task_states': self.task_states,\n", " 'student_attempts': self.student_attempts,\n", " 'ready_to_reset': self.ready_to_reset,\n", " }\n", "\n", " return json.dumps(state)\n", "\n", " def get_status(self, render_via_ajax):\n", " \"\"\"\n", " Gets the status panel to be displayed at the top right.\n", " Input: None\n", " Output: The status html to be rendered\n", " \"\"\"\n", " status = []\n", " for i in xrange(0, self.current_task_number + 1):\n", " task_data = self.get_last_response(i)\n", " task_data.update({'task_number': i + 1})\n", " status.append(task_data)\n", "\n", " context = {\n", " 'status_list': status,\n", " 'grader_type_image_dict': GRADER_TYPE_IMAGE_DICT,\n", " 'legend_list': LEGEND_LIST,\n", " 'render_via_ajax': render_via_ajax,\n", " }\n", " status_html = self.system.render_template(\"{0}/combined_open_ended_status.html\".format(self.TEMPLATE_DIR),\n", " context)\n", "\n", " return status_html\n", "\n", " def check_if_done_and_scored(self):\n", " \"\"\"\n", " Checks if the object is currently in a finished state (either student didn't meet criteria to move\n", " to next step, in which case they are in the allow_reset state, or they are done with the question\n", " entirely, in which case they will be in the self.DONE state), and if it is scored or not.\n", " @return: Boolean corresponding to the above.\n", " \"\"\"\n", " return (self.state == self.DONE or self.ready_to_reset) and self.is_scored\n", "\n", " def get_score(self):\n", " \"\"\"\n", " Score the student received on the problem, or None if there is no\n", " score.\n", "\n", " Returns:\n", " dictionary\n", " {'score': integer, from 0 to get_max_score(),\n", " 'total': get_max_score()}\n", " \"\"\"\n", " max_score = None\n", " score = None\n", " if self.is_scored and self.weight is not None:\n", " #Finds the maximum score of all student attempts and keeps it.\n", " score_mat = []\n", " for i in xrange(0, len(self.task_states)):\n", " #For each task, extract all student scores on that task (each attempt for each task)\n", " last_response = self.get_last_response(i)\n", " max_score = last_response.get('max_score', None)\n", " score = last_response.get('all_scores', None)\n", " if score is not None:\n", " #Convert none scores and weight scores properly\n", " for z in xrange(0, len(score)):\n", " if score[z] is None:\n", " score[z] = 0\n", " score[z] *= float(self.weight)\n", " score_mat.append(score)\n", "\n", " if len(score_mat) > 0:\n", " #Currently, assume that the final step is the correct one, and that those are the final scores.\n", " #This will change in the future, which is why the machinery above exists to extract all scores on all steps\n", " #TODO: better final score handling.\n", " scores = score_mat[-1]\n", " score = max(scores)\n", " else:\n", " score = 0\n", "\n", " if max_score is not None:\n", " #Weight the max score if it is not None\n", " max_score *= float(self.weight)\n", " else:\n", " #Without a max_score, we cannot have a score!\n", " score = None\n", "\n", " score_dict = {\n", " 'score': score,\n", " 'total': max_score,\n", " }\n", "\n", " return score_dict\n", "\n", " def max_score(self):\n", " ''' Maximum score. Two notes:\n", "\n", " * This is generic; in abstract, a problem could be 3/5 points on one\n", " randomization, and 5/7 on another\n", " '''\n", " max_score = None\n", " if self.check_if_done_and_scored():\n", " last_response = self.get_last_response(self.current_task_number)\n", " max_score = last_response['max_score']\n", " return max_score\n", "\n", " def get_progress(self):\n", " ''' Return a progress.Progress object that represents how far the\n", " student has gone in this module. Must be implemented to get correct\n", " progress tracking behavior in nesting modules like sequence and\n", " vertical.\n", "\n", " If this module has no notion of progress, return None.\n", " '''\n", " progress_object = Progress(self.current_task_number, len(self.task_xml))\n", "\n", " return progress_object\n", "\n", " def out_of_sync_error(self, get, msg=''):\n", " \"\"\"\n", " return dict out-of-sync error message, and also log.\n", " \"\"\"\n", " #This is a dev_facing_error\n", " log.warning(\"Combined module state out sync. state: %r, get: %r. %s\",\n", " self.state, get, msg)\n", " #This is a student_facing_error\n", " return {'success': False,\n", " 'error': 'The problem state got out-of-sync. Please try reloading the page.'}\n", "\n", "\n", "class CombinedOpenEndedV1Descriptor():\n", " \"\"\"\n", " Module for adding combined open ended questions\n", " \"\"\"\n", " mako_template = \"widgets/html-edit.html\"\n", " module_class = CombinedOpenEndedV1Module\n", " filename_extension = \"xml\"\n", "\n", " stores_state = True\n", " has_score = True\n", " template_dir_name = \"combinedopenended\"\n", "\n", " def __init__(self, system):\n", " self.system = system\n", "\n", " @classmethod\n", " def definition_from_xml(cls, xml_object, system):\n", " \"\"\"\n", " Pull out the individual tasks, the rubric, and the prompt, and parse\n", "\n", " Returns:\n", " {\n", " 'rubric': 'some-html',\n", " 'prompt': 'some-html',\n", " 'task_xml': dictionary of xml strings,\n", " }\n", " \"\"\"\n", " expected_children = ['task', 'rubric', 'prompt']\n", " for child in expected_children:\n", " if len(xml_object.xpath(child)) == 0:\n", " #This is a staff_facing_error\n", " raise ValueError(\n", " \"Combined Open Ended definition must include at least one '{0}' tag. Contact the learning sciences group for assistance. {1}\".format(\n", " child, xml_object))\n", "\n", " def parse_task(k):\n", " \"\"\"Assumes that xml_object has child k\"\"\"\n", " return [stringify_children(xml_object.xpath(k)[i]) for i in xrange(0, len(xml_object.xpath(k)))]\n", "\n", " def parse(k):\n", " \"\"\"Assumes that xml_object has child k\"\"\"\n", " return xml_object.xpath(k)[0]\n", "\n", " return {'task_xml': parse_task('task'), 'prompt': parse('prompt'), 'rubric': parse('rubric')}\n", "\n", " def definition_to_xml(self, resource_fs):\n", " '''Return an xml element representing this definition.'''\n", " elt = etree.Element('combinedopenended')\n", "\n", " def add_child(k):\n", " child_str = '<{tag}>{body}</{tag}>'.format(tag=k, body=self.definition[k])\n", " child_node = etree.fromstring(child_str)\n", " elt.append(child_node)\n", "\n", " for child in ['task']:\n", " add_child(child)\n", "\n", " return elt\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.008333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0.012987012987012988, 0, 0, 0.01694915254237288, 0.038461538461538464, 0, 0, 0.018518518518518517, 0.038461538461538464, 0, 0, 0.016666666666666666, 0, 0, 0, 0, 0, 0, 0, 0.021739130434782608, 0.04, 0, 0, 0, 0, 0, 0.009708737864077669, 0, 0, 0.00909090909090909, 0, 0.008403361344537815, 0.008403361344537815, 0, 0, 0.011111111111111112, 0.010869565217391304, 0, 0.008620689655172414, 0, 0, 0, 0.010416666666666666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02127659574468085, 0, 0, 0, 0.009900990099009901, 0, 0, 0.010309278350515464, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0.012345679012345678, 0.011904761904761904, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0.013513513513513514, 0, 0, 0, 0.01818181818181818, 0, 0.017241379310344827, 0, 0.017543859649122806, 0, 0, 0, 0, 0, 0.021052631578947368, 0, 0, 0.011627906976744186, 0.008849557522123894, 0.008849557522123894, 0, 0, 0, 0, 0, 0, 0, 0.010752688172043012, 0, 0, 0, 0, 0, 0.008333333333333333, 0, 0.016129032258064516, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009523809523809525, 0.010638297872340425, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0.008771929824561403, 0, 0, 0.00980392156862745, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.008849557522123894, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010526315789473684, 0, 0.023809523809523808, 0, 0, 0.020833333333333332, 0.02, 0.008928571428571428, 0, 0, 0.00909090909090909, 0, 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00909090909090909, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0.00909090909090909, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0.009708737864077669, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0.010638297872340425, 0.010416666666666666, 0.009523809523809525, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011235955056179775, 0, 0, 0.0196078431372549, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00909090909090909, 0, 0, 0, 0, 0.009615384615384616, 0, 0, 0, 0, 0, 0.00909090909090909, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01818181818181818, 0.008333333333333333, 0, 0.01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0.011904761904761904, 0.01020408163265306, 0.009259259259259259, 0, 0, 0, 0.009708737864077669, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0.01020408163265306, 0, 0.011627906976744186, 0.009259259259259259, 0.01098901098901099, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.008403361344537815, 0, 0, 0, 0.008849557522123894, 0, 0, 0.009009009009009009, 0, 0, 0.008620689655172414, 0, 0, 0.009259259259259259, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.008771929824561403, 0, 0, 0, 0, 0, 0.010416666666666666, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009708737864077669, 0, 0, 0, 0, 0, 0.024390243902439025, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0.009900990099009901, 0.009433962264150943, 0, 0.010101010101010102, 0.009615384615384616, 0, 0.009708737864077669, 0.009259259259259259, 0, 0.008771929824561403, 0.012345679012345678, 0.010416666666666666, 0, 0, 0, 0, 0, 0, 0, 0.00847457627118644, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0.008547008547008548, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.008849557522123894, 0.011235955056179775, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.008547008547008548, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00847457627118644, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009615384615384616, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.020833333333333332, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010416666666666666, 0, 0, 0, 0, 0, 0, 0, 0.01020408163265306, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.008695652173913044, 0, 0, 0, 0, 0, 0, 0.009345794392523364, 0.009433962264150943, 0.01020408163265306, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.013333333333333334, 0, 0, 0.019801980198019802, 0, 0, 0, 0, 0.014705882352941176, 0, 0, 0, 0, 0, 0, 0, 0.017857142857142856, 0.016129032258064516, 0.019230769230769232, 0, 0, 0, 0, 0, 0, 0.017857142857142856, 0, 0, 0.016129032258064516, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0.027777777777777776, 0, 0, 0.025, 0, 0.010526315789473684, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.021739130434782608, 0, 0.006493506493506494, 0, 0, 0, 0, 0.009174311926605505, 0, 0, 0, 0, 0, 0.00980392156862745, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0 ]
875
0.001996
#!/usr/bin/env python # -*- coding: utf-8 -*- """ """ from __future__ import print_function """Main lib file""" AMINOACID_CODES = ["ALA", "ARG", "ASN", "ASP", "CYS", "GLU", "GLN", "GLY", "HIS", "ILE", "LEU", "LYS", "MET", "PHE", "PRO", "SER", "THR", "TRP", "TYR", "VAL"] RES = ['DA', 'DG', 'DT', 'DC'] RES += ['A', 'G', 'U', 'C'] RESS = ['A', 'C', 'G', 'U', 'ADE', 'CYT', 'GUA', 'URY', 'URI', 'U34', 'U31', 'C31', '4SU', 'H2U', 'QUO', 'G7M', '5MU', '5MC', 'PSU', '2MG', '1MG', '1MA', 'M2G', '5BU', 'FHU', 'FMU', 'IU', 'OMG', 'OMC', 'OMU', 'A2M', 'A23', 'CCC', 'I'] + ['RC', 'RU', 'RA', 'RG', 'RT'] #DNA = ['DA', 'DG', 'DT', 'DC'] #RNA = ['A', 'G', 'U', 'C'] IONS = ['NA', 'MG', 'MN'] HYDROGEN_NAMES = ["H", "H5'", "H5''", "H4'", "H3'", "H2'", "HO2'", "H1'", "H3", "H5", "H6", "H5T", "H41", "1H5'", "2H5'", "HO2'", "1H4", "2H4", "1H2", "2H2", "H1", "H8", "H2", "1H6", "2H6", "HO5'", "H21", "H22", "H61", "H62", "H42", "HO3'", "1H2'", "2HO'", "HO'2", "H2'1" , "HO'2", "HO'2", "H2", "H2'1", "H1", "H2", "1H5*","2H5*", "H4*", "H3*", "H1*", "1H2*", "2HO*", "1H2", "2H2", "1H4", "2H4", "1H6", "2H6", "H1", "H2", "H3", "H5", "H6", "H8", "H5'1", "H5'2", "H3T"] import os import sys from collections import OrderedDict import re import string ignore_op3 = False def get_version(currfn='', verbose=False): #dupa """Get version of the tool based on state of the git repository. Return version. If currfn is empty, then the path is '.'. Hmm.. I think it will work. We will see. The version is not printed! https://github.com/m4rx9/curr_version/""" from subprocess import getoutput if currfn == '': path = '.' else: path = os.path.dirname(currfn) if verbose: print('get_version::path', path) if os.path.islink(currfn):#path + os.sep + os.path.basename(__file__)): path = os.path.dirname(os.readlink(path + os.sep + os.path.basename(currfn))) if not path: path = '.' if verbose: print('get_version::path2', path) curr_path = os.getcwd() os.chdir(os.path.abspath(path)) version = getoutput('git describe --long --tags --dirty --always') os.chdir(curr_path) if version.find('not found')>-1: return ' unknown' # > install git to get versioning based on git' else: return version class StrucFile: """StrucFile""" def __init__(self, fn): self.fn = fn self.report = [] self.report.append('The RNAStrucFile report: %s ' % fn) self.mol2_format = False self.lines = [] lines = open(fn).read().strip().split('\n') has_many_models = False for l in lines: # multi-models pdb files if l.startswith('MODEL'): has_many_models = True if l.startswith('ENDMDL'): break if l.startswith('ATOM') or l.startswith('HETATM') or l.startswith('TER') or l.startswith('END'): self.lines.append(l.strip()) if l.startswith("@<TRIPOS>"): self.mol2_format = True self.report.append('This is mol2 format') self.res = self.get_resn_uniq() def is_it_pdb(self): """Return True if the files is in PDB format.""" if len(self.lines): return True else: return False def is_mol2(self): """Return True if is_mol2 based on the presence of ```@<TRIPOS>```.""" return self.mol2_format def decap_gtp(self): lines = [] for l in self.lines: if l.startswith('ATOM') or l.startswith('HETATM') : if l[12:16].strip() in ['PG', 'O1G', 'O2G', 'O3G', 'O3B', 'PB','O1B','O2B', 'O3A']: continue if l[12:16].strip() == 'PA': l = l.replace('PA', 'P ') if l[12:16].strip() == 'O1A': l = l.replace('O1A', 'O1P') if l[12:16].strip() == 'O2A': l = l.replace('O2A', 'O2P') if l[17:20].strip() == 'GTP': l = l[:17] + ' G' + l[20:] l = l.replace('HETATM', 'ATOM ') lines.append(l) self.lines = lines def is_amber_like(self): """Use self.lines and check if there is XX line """ for l in self.lines: if l.startswith('ATOM') or l.startswith('HETATM') : rn = l[17:20] if rn in ['RU5', 'RC5', 'RA5', 'RT5', 'RG5']: self.report.append('This is amber-like format') return True return False def mol2toPDB(self, outfn=""): try: import pybel except ImportError: print('pybel is needed for mol2 to pdb convertion') #sys.exit(1) sys.exit(0) if not outfn: outfn = self.fn.replace('.mol2', '.pdb') for mol in pybel.readfile("mol2", self.fn): mol.write("pdb", outfn, overwrite=True) print('outfn: ', outfn) self.report.append(' Converted from mol2 to PDB') return outfn def get_no_lines(self): return len(self.lines) def get_text(self, add_end=True): txt = '' for l in self.lines: if l.startswith('END'): continue # skip end txt += l.strip() + '\n' if add_end: if not l.startswith('END'): txt += 'END' return txt.strip() def get_chain(self, chain_id='A'): txt = '' for l in self.lines: if l.startswith('ATOM') or l.startswith('HETATM') : if l[21] == chain_id: txt += l.strip() + '\n' txt += 'TER' return txt def get_resn_uniq(self): res = set() for l in self.lines: r = l[17:20].strip().upper() res.add(r) return res def check_res_if_std_na(self): wrong = [] for r in self.res: if r not in RES: wrong.append(r) return wrong def get_seq(self): """ You get `chains` such as: OrderedDict([('A', {'header': 'A:1-47', 'seq': 'CGUGGUUAGGGCCACGUUAAAUAGUUGCUUAAGCCCUAAGCGUUGAU'}), ('B', {'header': 'B:48-58', 'seq': 'AUCAGGUGCAA'})])""" seq = '' curri = int(self.lines[0][22:26]) seq = self.lines[0][19] chains = OrderedDict() curri = -100000000000000 #ugly chain_prev = None for l in self.lines: if l.startswith('ATOM') or l.startswith('HETATM') : resi = int(l[22:26]) if curri != resi: resname = l[17:20].strip() if len(resname) == 'GTP': # DG -> g GTP resname = 'g' if len(resname) > 1: # DG -> g GTP resname = resname[-1].lower() seq += resname chain_curr = l[21] if chain_prev != chain_curr and chain_prev: chains[chain_prev]['header'] += '-' + str(resi_prev) if chain_curr in chains: chains[chain_curr]['seq'] += resname else: chains[chain_curr] = dict() chains[chain_curr]['header'] = chain_curr + ':' + str(resi)#resi_prev) chains[chain_curr]['seq'] = resname resi_prev = resi chain_prev = chain_curr curri = resi chains[chain_prev]['header'] += '-' + str(resi_prev) seq = '' for c in list(chains.keys()): seq += '> ' + os.path.basename(self.fn) + ' ' + chains[c]['header'] + '\n' seq += chains[c]['seq'] + '\n' return seq.strip() def get_info_chains(self): """return A:3-21 B:22-32 """ seq = '' curri = int(self.lines[0][22:26]) seq = self.lines[0][19] chains = OrderedDict() curri = -100000000000000 #ugly chain_prev = None for l in self.lines: if l.startswith('ATOM') or l.startswith('HETATM') : resi = int(l[22:26]) if curri != resi: resname = l[17:20].strip() if len(resname) == 'GTP': # DG -> g GTP resname = 'g' if len(resname) > 1: # DG -> g GTP resname = resname[-1].lower() seq += resname chain_curr = l[21] if chain_prev != chain_curr and chain_prev: chains[chain_prev]['header'] += '-' + str(resi_prev) if chain_curr in chains: chains[chain_curr]['seq'] += resname else: chains[chain_curr] = dict() chains[chain_curr]['header'] = chain_curr + ':' + str(resi)#resi_prev) chains[chain_curr]['seq'] = resname resi_prev = resi chain_prev = chain_curr curri = resi chains[chain_prev]['header'] += '-' + str(resi_prev) seq = '' for c in list(chains.keys()): seq += chains[c]['header'].replace(':', ' ').replace('-', ' ') + ' ' return seq.strip() def detect_file_format(self): pass def detect_molecule_type(self): aa = [] na = [] for r in self.res: if r in AMINOACID_CODES: aa.append(r) if r in RESS: na.append(r) aa = float(len(aa)) / len(self.res) na = float(len(na)) / len(self.res) if aa == 0 and na == 0: return 'error' if aa > na: return '>protein< vs na', aa, na else: return 'protein vs >na<', aa, na def get_head(self): return '\n'.join(self.lines[:5]) def get_tail(self): return '\n'.join(self.lines[-5:]) def get_preview(self): t = '\n'.join(self.lines[:5]) t += '\n-------------------------------------------------------------------\n' t += '\n'.join(self.lines[-5:]) return t def remove_hydrogen(self): lines = [] for l in self.lines: if l[77:79].strip() == 'H': continue if l[12:16].strip() in HYDROGEN_NAMES: #if l[12:16].strip().startswith('H'): continue else: #print l[12:16] lines.append(l) self.lines = lines def remove_water(self): """Remove HOH and TIP3""" lines = [] for l in self.lines: if l[17:21].strip() in ['HOH', 'TIP3', 'WAT']: continue else: lines.append(l) self.lines = lines def remove_ion(self): """ TER 1025 U A 47 HETATM 1026 MG MG A 101 42.664 34.395 50.249 1.00 70.99 MG HETATM 1027 MG MG A 201 47.865 33.919 48.090 1.00 67.09 MG """ lines = [] for l in self.lines: element = l[76:78].strip().upper() element2 = l[17:20].strip().upper() if element in IONS: continue if element2 in IONS: continue else: lines.append(l) self.lines = lines def fixU__to__U(self): lines = [] for l in self.lines: if l.startswith('ATOM') or l.startswith('HETATM') : rn = l[17:20] rn = rn.replace('G ', ' G') rn = rn.replace('U ', ' U') rn = rn.replace('C ', ' C') rn = rn.replace('A ', ' A') l = l[:16] + ' ' + rn + ' ' + l[21:] #print l.strip() #print l2 #l = l.replace(' U ', ' U ') #l = l.replace(' G ', ' G ') #l = l.replace(' A ', ' A ') #l = l.replace(' C ', ' C ') lines.append(l) print('fixU__to__U OK') self.report.append(' Fix: U__ -> __U') self.lines = lines def resn_as_dna(self): lines = [] for l in self.lines: if l.startswith('ATOM') or l.startswith('HETATM') : #print l nl = l.replace( 'DA5', ' DA') # RA should be the last!!!! nl = nl.replace('DA3', ' DA') nl = nl.replace(' DA', ' DA') nl = nl.replace(' rA', ' DA') nl = nl.replace('DC5', ' DC') nl = nl.replace('DC3', ' DC') nl = nl.replace(' DC', ' DC') nl = nl.replace(' rC', ' DC') nl = nl.replace('DG5', ' DG') nl = nl.replace('DG3', ' DG') nl = nl.replace(' DG', ' DG') nl = nl.replace(' rG', ' DG') nl = nl.replace('DU5', ' DU') nl = nl.replace('DU3', ' DU') nl = nl.replace(' DU', ' DU') nl = nl.replace(' rU', ' DU') nl = nl.replace('DT5', ' DT') nl = nl.replace('DT3', ' DT') nl = nl.replace(' DT', ' DT') nl = nl.replace(' rT', ' DT') nl = nl.replace('C5M', 'C7 ') if l[17:20].strip() == 'G': nl = nl[:17] + ' DG' + nl[20:] if l[17:20].strip() == 'C': nl = nl[:17] + ' DC' + nl[20:] if l[17:20].strip() == 'T': nl = nl[:17] + ' DT' + nl[20:] if l[17:20].strip() == 'U': nl = nl[:17] + ' DU' + nl[20:] if l[17:20].strip() == 'A': nl = nl[:17] + ' DA' + nl[20:] lines.append(nl) if l.startswith("END") or l.startswith("TER"): lines.append(l) print('resn_as_dna') self.report.append(' resn_as_dna') self.lines = lines def fix_O_in_UC(self): """.. warning: remove RU names before using this function""" lines = [] for l in self.lines: #if l[12:16].strip() in #if l[12:16].strip().startswith('H'): nl = l.replace('O U', 'O2 U') nl =nl.replace('O C', 'O2 C') lines.append(nl) self.lines = lines def fix_op_atoms(self): lines = [] for l in self.lines: nl = l.replace('*', '\'') nl = nl.replace('O1P', 'OP1') nl = nl.replace('O2P', 'OP2') nl = nl.replace('O3P', 'OP3') lines.append(nl) self.lines = lines def get_report(self): return '\n'.join(self.report) def is_rna(self): wrong = [] for r in self.res: if r.upper().strip() in ['RC', 'RU', 'RA', 'RG', 'RT']: if r not in wrong_res: wrong_res.append(r) return wrong_res def check_res_if_std_dna(self): wrong_res = [] for r in self.res: if r.upper().strip() in ['A', 'T', 'C', 'G']: if r not in wrong_res: wrong_res.append(r) return wrong_res def check_res_if_supid_rna(self): wrong_res = [] for r in self.res: if r.upper().strip() in ['RC', 'RU', 'RA', 'RG', 'RT']: if r not in wrong_res: wrong_res.append(r) return wrong_res def is_rna(self): for r in self.res: if r.upper().strip() in ['RC', 'RU', 'RA', 'RG', 'RT']: if r not in wrong_res: wrong_res.append(r) return wrong_res def renum_atoms(self): """Renum atoms, from 1 to X for line; ATOM/HETATM""" lines = [] c = 1 for l in self.lines: l = l[:6] + str(c).rjust(5) + l[11:] c += 1 lines.append(l) self.lines = lines def fix_resn(self): lines = [] for l in self.lines: nl = l.replace( 'RA5', ' A') # RA should be the last!!!! nl = nl.replace('RA3', ' A') nl = nl.replace('ADE', ' A') nl = nl.replace(' RA', ' A') nl = nl.replace(' rA', ' A') nl = nl.replace('RC5', ' C') nl = nl.replace('RC3', ' C') nl = nl.replace('CYT', ' C') nl = nl.replace(' RC', ' C') nl = nl.replace(' rC', ' C') nl = nl.replace('RG5', ' G') nl = nl.replace('RG3', ' G') nl = nl.replace('GUA', ' G') nl = nl.replace(' RG', ' G') nl = nl.replace(' rG', ' G') nl = nl.replace('RU5', ' U') nl = nl.replace('RU3', ' U') nl = nl.replace('URA', ' U') nl = nl.replace(' RU', ' U') nl = nl.replace(' rU', ' U') nl = nl.replace('RT5', ' T') nl = nl.replace('RT3', ' T') nl = nl.replace('THY', ' T') nl = nl.replace(' RT', ' T') nl = nl.replace(' rT', ' T') lines.append(nl) self.lines = lines def check_res_if_std_prot(self): wrong = [] for r in self.res: if r not in AMINOACID_CODES: wrong.append(r) return wrong def write(self, outfn,v=True): """Write ```self.lines``` to a file (and END file")""" f = open(outfn, 'w') for l in self.lines: f.write(l + '\n') if not l.startswith('END'): f.write('END') f.close() if v: print('Write %s' % outfn) def get_rnapuzzle_ready(self, renumber_residues=True): """Get rnapuzzle ready structure. Submission format @http://ahsoka.u-strasbg.fr/rnapuzzles/ Does: - keep only given atoms, - renumber residues from 1, if renumber_residues=True (by default) """ try: from Bio import PDB from Bio.PDB import PDBIO except: sys.exit('Error: Install biopython to use this function (pip biopython)') import copy G_ATOMS = ['P', 'OP1', 'OP2', 'O5\'', 'C5\'', 'C4\'', 'O4\'', 'C3\'', 'O3\'', 'C2\'', 'O2\'', 'C1\'', 'N9', 'C8', 'N7', 'C5', 'C6', 'O6', 'N1', 'C2', 'N2', 'N3', 'C4'] A_ATOMS = "P OP1 OP2 O5' C5' C4' O4' C3' O3' C2' O2' C1' N9 C8 N7 C5 C6 N6 N1 C2 N3 C4".split() U_ATOMS = "P OP1 OP2 O5' C5' C4' O4' C3' O3' C2' O2' C1' N1 C2 O2 N3 C4 O4 C5 C6".split() C_ATOMS = "P OP1 OP2 O5' C5' C4' O4' C3' O3' C2' O2' C1' N1 C2 O2 N3 C4 N4 C5 C6".split() ftmp = '/tmp/out.pdb' self.write(ftmp,v=False) parser = PDB.PDBParser() struct = parser.get_structure('', ftmp) model = struct[0] s2 = PDB.Structure.Structure(struct.id) m2 = PDB.Model.Model(model.id) chains2 = [] missing = [] for chain in model.get_list(): res = [] for r in chain: res.append(r) res = copy.copy(res) c2 = PDB.Chain.Chain(chain.id) c = 1 # new chain, goes from 1 !!! for r in res: # hack for amber/qrna r.resname = r.resname.strip() if r.resname == 'RC3': r.resname = 'C' if r.resname == 'RU3': r.resname = 'U' if r.resname == 'RG3': r.resname = 'G' if r.resname == 'RA3': r.resname = 'A' if r.resname == 'C3': r.resname = 'C' if r.resname == 'U3': r.resname = 'U' if r.resname == 'G3': r.resname = 'G' if r.resname == 'A3': r.resname = 'A' if r.resname == 'RC5': r.resname = 'C' if r.resname == 'RU5': r.resname = 'U' if r.resname == 'RG5': r.resname = 'G' if r.resname == 'RA5': r.resname = 'A' if r.resname == 'C5': r.resname = 'C' if r.resname == 'U5': r.resname = 'U' if r.resname == 'G5': r.resname = 'G' if r.resname == 'A5': r.resname = 'A' if r.resname.strip() == 'RC': r.resname = 'C' if r.resname.strip() == 'RU': r.resname = 'U' if r.resname.strip() == 'RG': r.resname = 'G' if r.resname.strip() == 'RA': r.resname = 'A' r2 = PDB.Residue.Residue(r.id, r.resname.strip(), r.segid) if renumber_residues: r2.id = (r2.id[0], c, r2.id[2]) ## renumber residues if str(r.get_resname()).strip() == "G": for an in G_ATOMS: try: r2.add(r[an]) except KeyError: #print 'Missing:', an, r, ' new resi', c missing.append([an, chain.id, r, c]) c2.add(r2) elif str(r.get_resname()).strip() == "A": for an in A_ATOMS: try: r2.add(r[an]) except KeyError: #print 'Missing:', an, r, ' new resi', c missing.append([an, chain.id, r, c]) c2.add(r2) elif str(r.get_resname()).strip() == "C": for an in C_ATOMS: try: r2.add(r[an]) except: #print 'Missing:', an, r, ' new resi', c missing.append([an, chain.id, r, c]) c2.add(r2) elif str(r.get_resname()).strip() == "U": for an in U_ATOMS: try: r2.add(r[an]) except KeyError: #print 'Missing:', an, r,' new resi', c missing.append([an, chain.id, r, c]) c2.add(r2) c += 1 chains2.append(c2) io = PDBIO() s2.add(m2) for chain2 in chains2: m2.add(chain2) #print c2 #print m2 io.set_structure(s2) #fout = fn.replace('.pdb', '_fx.pdb') fout = '/tmp/outout.pdb' # hack io.save(fout) if missing: print('REMARK 000 Missing atoms:') for i in missing: print('REMARK 000 +', i[0], i[1], i[2], 'residue #', i[3]) #raise Exception('Missing atoms in %s' % self.fn) s = StrucFile(fout) self.lines = s.lines def get_simrna_ready(self, renumber_residues=True): """Get simrna_ready .. - take only first model, - renumber residues if renumber_residues=True .. warning:: requires: Biopython""" try: from Bio import PDB from Bio.PDB import PDBIO except: sys.exit('Error: Install biopython to use this function (pip biopython)') import warnings warnings.filterwarnings('ignore', '.*Invalid or missing.*',) warnings.filterwarnings('ignore', '.*with given element *',) import copy G_ATOMS = "P OP1 OP2 O5' C5' C4' O4' C3' O3' C2' O2' C1' N9 C8 N7 C5 C6 O6 N1 C2 N2 N3 C4".split() A_ATOMS = "P OP1 OP2 O5' C5' C4' O4' C3' O3' C2' O2' C1' N9 C8 N7 C5 C6 N6 N1 C2 N3 C4".split() U_ATOMS = "P OP1 OP2 O5' C5' C4' O4' C3' O3' C2' O2' C1' N1 C2 O2 N3 C4 O4 C5 C6".split() C_ATOMS = "P OP1 OP2 O5' C5' C4' O4' C3' O3' C2' O2' C1' N1 C2 O2 N3 C4 N4 C5 C6".split() ftmp = '/tmp/out.pdb' self.write(ftmp,v=False) parser = PDB.PDBParser() struct = parser.get_structure('', ftmp) model = struct[0] s2 = PDB.Structure.Structure(struct.id) m2 = PDB.Model.Model(model.id) chains2 = [] missing = [] for chain in model.get_list(): res = [] for r in chain: res.append(r) res = copy.copy(res) c2 = PDB.Chain.Chain(chain.id) c = 1 # new chain, goes from 1 if renumber True for r in res: # hack for amber/qrna r.resname = r.resname.strip() if r.resname == 'RC3': r.resname = 'C' if r.resname == 'RU3': r.resname = 'U' if r.resname == 'RG3': r.resname = 'G' if r.resname == 'RA3': r.resname = 'A' if r.resname == 'C3': r.resname = 'C' if r.resname == 'U3': r.resname = 'U' if r.resname == 'G3': r.resname = 'G' if r.resname == 'A3': r.resname = 'A' if r.resname == 'RC5': r.resname = 'C' if r.resname == 'RU5': r.resname = 'U' if r.resname == 'RG5': r.resname = 'G' if r.resname == 'RA5': r.resname = 'A' if r.resname == 'C5': r.resname = 'C' if r.resname == 'U5': r.resname = 'U' if r.resname == 'G5': r.resname = 'G' if r.resname == 'A5': r.resname = 'A' if r.resname.strip() == 'RC': r.resname = 'C' if r.resname.strip() == 'RU': r.resname = 'U' if r.resname.strip() == 'RG': r.resname = 'G' if r.resname.strip() == 'RA': r.resname = 'A' r2 = PDB.Residue.Residue(r.id, r.resname.strip(), r.segid) if renumber_residues: r2.id = (r2.id[0], c, r2.id[2]) ## renumber residues if c == 1: p_missing = True #if p_missing: # try: # x = r["O5'"] # x.id = ' P' # x.name = ' P' # x.fullname = ' P' # print "REMARK 000 FIX O5' -> P fix in chain ", chain.id # except: # pass for a in r: if a.id == 'P': p_missing = False if p_missing: currfn = __file__ if currfn == '': path = '.' else: path = os.path.dirname(currfn) if os.path.islink(currfn):#path + os.sep + os.path.basename(__file__)): path = os.path.dirname(os.readlink(path + os.sep + os.path.basename(currfn))) po3_struc = PDB.PDBParser().get_structure('', path + '/data/PO3_inner.pdb') po3 = [po3_atom for po3_atom in po3_struc[0].get_residues()][0] r_atoms = [r["O4'"], r["C4'"], r["C3'"]] po3_atoms = [po3["O4'"], po3["C4'"], po3["C3'"]] sup = PDB.Superimposer() sup.set_atoms(r_atoms, po3_atoms) rms = round(sup.rms, 3) sup.apply( po3_struc.get_atoms() ) # to all atoms of po3 r.add( po3['P']) r.add( po3['OP1']) r.add( po3['OP2']) try: r.add( po3["O5'"]) except: del r["O5'"] r.add( po3["O5'"]) p_missing = False # off this function # save it #io = PDB.PDBIO() #io.set_structure( po3_struc ) #io.save("po3.pdb") if str(r.get_resname()).strip() == "G": for an in G_ATOMS: if c == 1 and ignore_op3: if an in ['P', 'OP1', 'OP2']: continue try: if c == 1 and an == "O5'" and p_missing: r2.add(x) else: r2.add(r[an]) except KeyError: #print 'Missing:', an, r, ' new resi', c missing.append([an, chain.id, r, c]) c2.add(r2) elif str(r.get_resname()).strip() == "A": for an in A_ATOMS: if c == 1 and ignore_op3: if an in ['P', 'OP1', 'OP2']: continue try: if c == 1 and an == "O5'" and p_missing: r2.add(x) else: r2.add(r[an]) except KeyError: #print 'Missing:', an, r, ' new resi', c missing.append([an, chain.id, r, c]) c2.add(r2) elif str(r.get_resname()).strip() == "C": for an in C_ATOMS: if c == 1 and ignore_op3: if an in ['P', 'OP1', 'OP2']: continue try: if c == 1 and an == "O5'" and p_missing: r2.add(x) else: r2.add(r[an]) except: #print 'Missing:', an, r, ' new resi', c missing.append([an, chain.id, r, c]) c2.add(r2) elif str(r.get_resname()).strip() == "U": for an in U_ATOMS: if c == 1 and ignore_op3: if an in ['P', 'OP1', 'OP2']: continue try: if c == 1 and an == "O5'" and p_missing: r2.add(x) else: r2.add(r[an]) except KeyError: #print 'Missing:', an, r,' new resi', c missing.append([an, chain.id, r, c]) c2.add(r2) c += 1 chains2.append(c2) io = PDBIO() s2.add(m2) for chain2 in chains2: m2.add(chain2) #print c2 #print m2 io.set_structure(s2) #fout = fn.replace('.pdb', '_fx.pdb') fout = '/tmp/outout.pdb' # hack io.save(fout) if missing: print('REMARK 000 Missing atoms:') for i in missing: print('REMARK 000 +', i[0], i[1], i[2], 'residue #', i[3]) #raise Exception('Missing atoms in %s' % self.fn) s = StrucFile(fout) self.lines = s.lines def edit_occupancy_of_pdb(txt, pdb, pdb_out,v=False): """Make all atoms 1 (flexi) and then set occupancy 0 for seletected atoms. Return False if error. True if OK """ struc = PDB.PDBParser().get_structure('struc', pdb) txt = txt.replace(' ','') if v:print(txt) l = re.split('[,:;]', txt) if v:print(l) for s in struc: for c in s: for r in c: for a in r: a.set_occupancy(1) # make it flaxi for i in l: # ['A', '1-10', '15', '25-30', 'B', '1-10'] if i in string.ascii_letters: if v:print('chain', i) chain_curr = i continue if i.find('-') > -1: start, ends = i.split('-') if start > ends: print('Error: range start > end ' + i, file=sys.stderr) return False index = list(range(int(start), int(ends)+1)) else: index=[int(i)] for i in index: # change b_factor try: atoms = struc[0][chain_curr][i] except KeyError: if i == chain_curr: print('Error: Chain ' + chain_curr + ' not found in the PDB structure', file=sys.stderr) else: print('Error: Residue ' + chain_curr + ':' + str(i) + ' found in the PDB structure', file=sys.stderr) return False for a in atoms: a.set_occupancy(0) io = PDBIO() io.set_structure(struc) io.save(pdb_out) print('Saved ', pdb_out) return True # main if '__main__' == __name__: fn = 'input/image' print('fn:', fn) struc = StrucFile(fn) print(' pdb?:', struc.is_it_pdb()) print(' # atoms:', struc.get_no_lines()) fn = 'input/na.pdb' s = StrucFile(fn) print(s.detect_molecule_type()) #res = get_all_res(na) #print 'what is?', what_is(res) #print res print('non standard:', s.check_res_if_std_na()) print('is protein:', s.detect_molecule_type()) fn = 'input/prot.pdb' s = StrucFile(fn) print('non standard:', s.check_res_if_std_prot()) print('is protein:', s.detect_molecule_type()) fn = 'input/rna-ru.pdb' s = StrucFile(fn) print('non standard:', s.check_res_if_supid_rna()) print('is protein:', s.detect_molecule_type()) fn = 'input/na_highAtomNum.pdb' print(fn) s = StrucFile(fn) s.renum_atoms() s.write('output/na_highAtomNum.pdb') fn = 'input/na_solvet_old_format.pdb' print(fn) s = StrucFile(fn) s.fix_op_atoms() s.remove_hydrogen() s.remove_ion() s.remove_water() s.write('output/na_solvet_old_format.pdb') fn = 'input/na_solvet_old_format.pdb' print(fn) s = StrucFile(fn) s.fix_resn() s.remove_hydrogen() s.remove_ion() s.remove_water() s.write('output/na_solvet_old_format.pdb') #fn = 'input/na_solvet_old_format__.pdb' #s = StrucFile(fn) #s.fix_resn() #s.remove_hydrogen() #s.remove_ion() #s.remove_water() #s.renum_atoms() #s.fix_op_atoms() #s.write('output/na_solvet_old_format__.pdb') fn = 'input/1xjr.pdb' s.fix_resn() s.remove_hydrogen() s.remove_ion() s.remove_water() s.renum_atoms() s.fix_op_atoms() s.write('output/1xjr.pdb') fn = 'input/decoy0165_amb.pdb' print(fn) s = StrucFile(fn) s.fix_resn() s.remove_hydrogen() s.remove_ion() s.remove_water() s.renum_atoms() s.fix_O_in_UC() s.fix_op_atoms() s.write('output/decoy0165_amb_clx.pdb') fn = 'input/farna.pdb' print(fn) s = StrucFile(fn) s.fix_resn() s.remove_hydrogen() s.remove_ion() s.remove_water() s.fix_op_atoms() s.renum_atoms() s.write('output/farna.pdb') fn = 'input/farna.pdb' print(fn) r = StrucFile(fn) print(r.is_mol2()) if True: print('================================================') print("input/1xjr_clx_fChimera_noIncludeNumbers.mol2") r = StrucFile("input/1xjr_clx_fChimera_noIncludeNumbers.mol2") print(r.is_mol2()) r.mol2toPDB('/tmp/x.pdb') r = StrucFile('/tmp/x.pdb') print(r.get_report()) r.fix_resn() r.remove_hydrogen() r.remove_ion() r.remove_water() r.fix_op_atoms() r.renum_atoms() r.fixU__to__U() r.write("output/1xjr_clx_fChimera_noIncludeNumbers.mol2") if True: r = StrucFile("input/2du3_prot_bound.mol2") print(r.is_mol2()) outfn = r.mol2toPDB() print(r.get_report()) print('================================================') fn = "input/3e5fA-nogtp_processed_zephyr.pdb" r = StrucFile(fn) print(r.is_mol2()) #outfn = r.mol2toPDB() print(r.is_amber_like()) print(r.get_report()) print(r.get_preview()) r.fix_resn() print(r.get_preview()) r.remove_hydrogen() r.remove_ion() r.remove_water() #renum_atoms(t, t) #fix_O_in_UC(t, t) #fix_op_atoms(t, t) r.write('output/3e5fA-nogtp_processed_zephyr.pdb') print() fn = "input/1xjr_clx_charmm.pdb" print(fn) s = StrucFile(fn) s.fix_resn() s.remove_hydrogen() s.remove_ion() s.remove_water() s.write('output/1xjr_clx_charmm.pdb') #renum_atoms(t, t) #fix_O_in_UC(t, t) #fix_op_atoms(t, t) print() fn = "input/dna_fconvpdb_charmm22.pdb" print(fn) r = StrucFile(fn) r.get_preview() r.resn_as_dna() r.remove_hydrogen() r.remove_ion() r.remove_water() r.fix_resn() print(r.get_head()) print(r.get_tail()) print(r.get_preview()) r.write("output/dna_fconvpdb_charmm22.pdb") print() fn = "input/1a9l_NMR_1_2_models.pdb" print(fn) r = StrucFile(fn) r.write("output/1a9l_NMR_1_2_models_lib.pdb") #r.get_text() # get #1 model
[ "#!/usr/bin/env python\n", "# -*- coding: utf-8 -*-\n", "\"\"\"\n", "\"\"\"\n", "from __future__ import print_function\n", "\"\"\"Main lib file\"\"\"\n", "\n", "AMINOACID_CODES = [\"ALA\", \"ARG\", \"ASN\", \"ASP\", \"CYS\", \"GLU\", \"GLN\", \"GLY\",\n", " \"HIS\", \"ILE\", \"LEU\", \"LYS\", \"MET\", \"PHE\", \"PRO\", \"SER\", \"THR\",\n", " \"TRP\", \"TYR\", \"VAL\"]\n", "RES = ['DA', 'DG', 'DT', 'DC']\n", "RES += ['A', 'G', 'U', 'C']\n", "\n", "RESS = ['A', 'C', 'G', 'U', 'ADE', 'CYT', 'GUA', 'URY', 'URI', 'U34', 'U31', 'C31', '4SU', 'H2U', 'QUO', 'G7M', '5MU', '5MC', 'PSU', '2MG', '1MG', '1MA', 'M2G', '5BU', 'FHU', 'FMU', 'IU', 'OMG', 'OMC', 'OMU', 'A2M', 'A23', 'CCC', 'I'] + ['RC', 'RU', 'RA', 'RG', 'RT']\n", "#DNA = ['DA', 'DG', 'DT', 'DC']\n", "#RNA = ['A', 'G', 'U', 'C']\n", "IONS = ['NA', 'MG', 'MN']\n", "HYDROGEN_NAMES = [\"H\", \"H5'\", \"H5''\", \"H4'\", \"H3'\", \"H2'\", \"HO2'\", \"H1'\", \"H3\", \"H5\", \"H6\", \"H5T\", \"H41\", \"1H5'\", \n", " \"2H5'\", \"HO2'\", \"1H4\", \"2H4\", \"1H2\", \"2H2\", \"H1\", \"H8\", \"H2\", \"1H6\", \"2H6\",\n", " \"HO5'\", \"H21\", \"H22\", \"H61\", \"H62\", \"H42\", \"HO3'\", \"1H2'\", \"2HO'\", \"HO'2\", \"H2'1\" , \"HO'2\", \"HO'2\",\n", " \"H2\", \"H2'1\", \"H1\", \"H2\", \"1H5*\",\"2H5*\", \"H4*\", \"H3*\", \"H1*\", \"1H2*\", \"2HO*\", \"1H2\", \"2H2\", \"1H4\", \"2H4\", \"1H6\", \"2H6\", \"H1\", \"H2\", \"H3\", \"H5\", \"H6\", \"H8\", \"H5'1\", \"H5'2\", \"H3T\"]\n", "\n", "import os\n", "import sys\n", "from collections import OrderedDict\n", "import re\n", "import string\n", "\n", "ignore_op3 = False\n", "\n", "def get_version(currfn='', verbose=False): #dupa\n", " \"\"\"Get version of the tool based on state of the git repository.\n", " Return version. \n", " If currfn is empty, then the path is '.'. Hmm.. I think it will work. We will see.\n", " The version is not printed!\n", " https://github.com/m4rx9/curr_version/\"\"\"\n", " from subprocess import getoutput\n", "\n", " if currfn == '':\n", " path = '.'\n", " else:\n", " path = os.path.dirname(currfn)\n", " if verbose: print('get_version::path', path)\n", " if os.path.islink(currfn):#path + os.sep + os.path.basename(__file__)):\n", " path = os.path.dirname(os.readlink(path + os.sep + os.path.basename(currfn)))\n", " if not path: path = '.'\n", " if verbose: print('get_version::path2', path)\n", " curr_path = os.getcwd()\n", " os.chdir(os.path.abspath(path))\n", " version = getoutput('git describe --long --tags --dirty --always')\n", " os.chdir(curr_path)\n", " if version.find('not found')>-1:\n", " return ' unknown' # > install git to get versioning based on git'\n", " else:\n", " return version\n", "\n", "class StrucFile:\n", " \"\"\"StrucFile\"\"\"\n", " def __init__(self, fn):\n", " self.fn = fn\n", " \n", " self.report = []\n", " self.report.append('The RNAStrucFile report: %s ' % fn) \n", "\n", " self.mol2_format = False\n", "\n", " self.lines = []\n", " lines = open(fn).read().strip().split('\\n')\n", " has_many_models = False\n", " for l in lines:\n", " # multi-models pdb files\n", " if l.startswith('MODEL'):\n", " has_many_models = True\n", " if l.startswith('ENDMDL'):\n", " break\n", "\n", " if l.startswith('ATOM') or l.startswith('HETATM') or l.startswith('TER') or l.startswith('END'):\n", "\n", " self.lines.append(l.strip())\n", " if l.startswith(\"@<TRIPOS>\"):\n", " self.mol2_format = True\n", " self.report.append('This is mol2 format')\n", " \n", " self.res = self.get_resn_uniq()\n", "\n", " def is_it_pdb(self):\n", " \"\"\"Return True if the files is in PDB format.\"\"\"\n", " if len(self.lines):\n", " return True\n", " else:\n", " return False\n", "\n", " def is_mol2(self):\n", " \"\"\"Return True if is_mol2 based on the presence of ```@<TRIPOS>```.\"\"\"\n", " return self.mol2_format\n", "\n", " def decap_gtp(self):\n", " lines = []\n", " for l in self.lines:\n", " if l.startswith('ATOM') or l.startswith('HETATM') :\n", " if l[12:16].strip() in ['PG', 'O1G', 'O2G', 'O3G', 'O3B', 'PB','O1B','O2B', 'O3A']:\n", " continue\n", " if l[12:16].strip() == 'PA':\n", " l = l.replace('PA', 'P ')\n", " if l[12:16].strip() == 'O1A':\n", " l = l.replace('O1A', 'O1P')\n", " if l[12:16].strip() == 'O2A':\n", " l = l.replace('O2A', 'O2P')\n", " if l[17:20].strip() == 'GTP':\n", " l = l[:17] + ' G' + l[20:]\n", " l = l.replace('HETATM', 'ATOM ')\n", " lines.append(l)\n", " self.lines = lines\n", " \n", " def is_amber_like(self):\n", " \"\"\"Use self.lines and check if there is XX line\n", " \"\"\"\n", " for l in self.lines:\n", " if l.startswith('ATOM') or l.startswith('HETATM') :\n", " rn = l[17:20]\n", " if rn in ['RU5', 'RC5', 'RA5', 'RT5', 'RG5']:\n", " self.report.append('This is amber-like format')\n", " return True\n", " return False\n", "\n", " def mol2toPDB(self, outfn=\"\"):\n", " try:\n", " import pybel\n", " except ImportError:\n", " print('pybel is needed for mol2 to pdb convertion')\n", " #sys.exit(1)\n", " sys.exit(0)\n", "\n", " if not outfn:\n", " outfn = self.fn.replace('.mol2', '.pdb')\n", " \n", " for mol in pybel.readfile(\"mol2\", self.fn):\n", " mol.write(\"pdb\", outfn, overwrite=True)\n", "\n", " print('outfn: ', outfn)\n", " self.report.append(' Converted from mol2 to PDB')\n", " return outfn\n", "\n", " def get_no_lines(self):\n", " return len(self.lines)\n", "\n", " def get_text(self, add_end=True):\n", " txt = ''\n", " for l in self.lines:\n", " if l.startswith('END'):\n", " continue # skip end\n", " txt += l.strip() + '\\n'\n", " if add_end:\n", " if not l.startswith('END'):\n", " txt += 'END'\n", " return txt.strip()\n", "\n", " def get_chain(self, chain_id='A'):\n", " txt = ''\n", " for l in self.lines:\n", " if l.startswith('ATOM') or l.startswith('HETATM') :\n", " if l[21] == chain_id:\n", " txt += l.strip() + '\\n'\n", " txt += 'TER'\n", " return txt\n", "\n", " def get_resn_uniq(self):\n", " res = set()\n", " for l in self.lines:\n", " r = l[17:20].strip().upper()\n", " res.add(r)\n", " return res\n", "\n", " def check_res_if_std_na(self):\n", " wrong = []\n", " \n", " for r in self.res:\n", " if r not in RES:\n", " wrong.append(r)\n", " return wrong\n", "\n", " def get_seq(self):\n", " \"\"\"\n", " You get `chains` such as:\n", " OrderedDict([('A', {'header': 'A:1-47', 'seq': 'CGUGGUUAGGGCCACGUUAAAUAGUUGCUUAAGCCCUAAGCGUUGAU'}), ('B', {'header': 'B:48-58', 'seq': 'AUCAGGUGCAA'})])\"\"\"\n", " \n", " seq = ''\n", " curri = int(self.lines[0][22:26])\n", " seq = self.lines[0][19]\n", " chains = OrderedDict()\n", " curri = -100000000000000 #ugly\n", " chain_prev = None\n", " for l in self.lines:\n", " if l.startswith('ATOM') or l.startswith('HETATM') :\n", " resi = int(l[22:26])\n", " if curri != resi:\n", " resname = l[17:20].strip()\n", " if len(resname) == 'GTP': # DG -> g GTP\n", " resname = 'g'\n", " if len(resname) > 1: # DG -> g GTP\n", " resname = resname[-1].lower()\n", " seq += resname\n", " chain_curr = l[21]\n", " if chain_prev != chain_curr and chain_prev:\n", " chains[chain_prev]['header'] += '-' + str(resi_prev)\n", " if chain_curr in chains: \n", " chains[chain_curr]['seq'] += resname\n", " else:\n", " chains[chain_curr] = dict()\n", " chains[chain_curr]['header'] = chain_curr + ':' + str(resi)#resi_prev)\n", " chains[chain_curr]['seq'] = resname\n", " resi_prev = resi\n", " chain_prev = chain_curr\n", " curri = resi\n", " chains[chain_prev]['header'] += '-' + str(resi_prev)\n", " seq = ''\n", " for c in list(chains.keys()):\n", " seq += '> ' + os.path.basename(self.fn) + ' ' + chains[c]['header'] + '\\n'\n", " seq += chains[c]['seq'] + '\\n'\n", " return seq.strip()\n", "\n", " def get_info_chains(self):\n", " \"\"\"return A:3-21 B:22-32\n", " \"\"\"\n", " seq = ''\n", " curri = int(self.lines[0][22:26])\n", " seq = self.lines[0][19]\n", " chains = OrderedDict()\n", " curri = -100000000000000 #ugly\n", " chain_prev = None\n", " for l in self.lines:\n", " if l.startswith('ATOM') or l.startswith('HETATM') :\n", " resi = int(l[22:26])\n", " if curri != resi:\n", " resname = l[17:20].strip()\n", " if len(resname) == 'GTP': # DG -> g GTP\n", " resname = 'g'\n", " if len(resname) > 1: # DG -> g GTP\n", " resname = resname[-1].lower()\n", " seq += resname\n", " chain_curr = l[21]\n", " if chain_prev != chain_curr and chain_prev:\n", " chains[chain_prev]['header'] += '-' + str(resi_prev)\n", " if chain_curr in chains: \n", " chains[chain_curr]['seq'] += resname\n", " else:\n", " chains[chain_curr] = dict()\n", " chains[chain_curr]['header'] = chain_curr + ':' + str(resi)#resi_prev)\n", " chains[chain_curr]['seq'] = resname\n", " resi_prev = resi\n", " chain_prev = chain_curr\n", " curri = resi\n", " chains[chain_prev]['header'] += '-' + str(resi_prev)\n", " seq = ''\n", " for c in list(chains.keys()):\n", " seq += chains[c]['header'].replace(':', ' ').replace('-', ' ') + ' '\n", " return seq.strip()\n", "\n", " def detect_file_format(self):\n", " pass\n", " \n", " def detect_molecule_type(self):\n", " aa = []\n", " na = []\n", " for r in self.res:\n", " if r in AMINOACID_CODES:\n", " aa.append(r)\n", " if r in RESS:\n", " na.append(r) \n", "\n", " aa = float(len(aa)) / len(self.res)\n", " na = float(len(na)) / len(self.res)\n", "\n", " if aa == 0 and na == 0:\n", " return 'error'\n", " if aa > na:\n", " return '>protein< vs na', aa, na\n", " else:\n", " return 'protein vs >na<', aa, na\n", "\n", " def get_head(self):\n", " return '\\n'.join(self.lines[:5])\n", "\n", " def get_tail(self):\n", " return '\\n'.join(self.lines[-5:])\n", "\n", " def get_preview(self):\n", " t = '\\n'.join(self.lines[:5])\n", " t += '\\n-------------------------------------------------------------------\\n'\n", " t += '\\n'.join(self.lines[-5:])\n", " return t\n", "\n", " def remove_hydrogen(self):\n", " lines = []\n", " for l in self.lines:\n", " if l[77:79].strip() == 'H':\n", " continue\n", " if l[12:16].strip() in HYDROGEN_NAMES:\n", " #if l[12:16].strip().startswith('H'):\n", " continue\n", " else:\n", " #print l[12:16]\n", " lines.append(l)\n", " self.lines = lines\n", "\n", " def remove_water(self):\n", " \"\"\"Remove HOH and TIP3\"\"\"\n", " lines = []\n", " for l in self.lines:\n", " if l[17:21].strip() in ['HOH', 'TIP3', 'WAT']:\n", " continue\n", " else:\n", " lines.append(l)\n", " self.lines = lines\n", "\n", " def remove_ion(self):\n", " \"\"\"\n", " TER 1025 U A 47 \n", " HETATM 1026 MG MG A 101 42.664 34.395 50.249 1.00 70.99 MG \n", " HETATM 1027 MG MG A 201 47.865 33.919 48.090 1.00 67.09 MG \n", " \"\"\"\n", " lines = []\n", " for l in self.lines:\n", " element = l[76:78].strip().upper()\n", " element2 = l[17:20].strip().upper()\n", " if element in IONS:\n", " continue\n", " if element2 in IONS:\n", " continue\n", " else:\n", " lines.append(l)\n", " self.lines = lines\n", "\n", " def fixU__to__U(self):\n", " lines = []\n", " for l in self.lines:\n", " if l.startswith('ATOM') or l.startswith('HETATM') :\n", " rn = l[17:20]\n", " rn = rn.replace('G ', ' G')\n", " rn = rn.replace('U ', ' U')\n", " rn = rn.replace('C ', ' C')\n", " rn = rn.replace('A ', ' A')\n", " l = l[:16] + ' ' + rn + ' ' + l[21:]\n", " #print l.strip()\n", " #print l2\n", " #l = l.replace(' U ', ' U ')\n", " #l = l.replace(' G ', ' G ')\n", " #l = l.replace(' A ', ' A ')\n", " #l = l.replace(' C ', ' C ')\n", " lines.append(l)\n", " print('fixU__to__U OK')\n", " self.report.append(' Fix: U__ -> __U')\n", " self.lines = lines\n", "\n", " def resn_as_dna(self):\n", " lines = []\n", " for l in self.lines:\n", " if l.startswith('ATOM') or l.startswith('HETATM') :\n", " #print l\n", " nl = l.replace( 'DA5', ' DA') # RA should be the last!!!!\n", " nl = nl.replace('DA3', ' DA')\n", " nl = nl.replace(' DA', ' DA')\n", " nl = nl.replace(' rA', ' DA')\n", "\n", " nl = nl.replace('DC5', ' DC')\n", " nl = nl.replace('DC3', ' DC')\n", " nl = nl.replace(' DC', ' DC')\n", " nl = nl.replace(' rC', ' DC')\n", "\n", " nl = nl.replace('DG5', ' DG')\n", " nl = nl.replace('DG3', ' DG')\n", " nl = nl.replace(' DG', ' DG')\n", " nl = nl.replace(' rG', ' DG')\n", "\n", " nl = nl.replace('DU5', ' DU')\n", " nl = nl.replace('DU3', ' DU')\n", " nl = nl.replace(' DU', ' DU')\n", " nl = nl.replace(' rU', ' DU')\n", "\n", " nl = nl.replace('DT5', ' DT')\n", " nl = nl.replace('DT3', ' DT')\n", " nl = nl.replace(' DT', ' DT')\n", " nl = nl.replace(' rT', ' DT')\n", "\n", " nl = nl.replace('C5M', 'C7 ')\n", "\n", " if l[17:20].strip() == 'G':\n", " nl = nl[:17] + ' DG' + nl[20:]\n", "\n", " if l[17:20].strip() == 'C':\n", " nl = nl[:17] + ' DC' + nl[20:]\n", "\n", " if l[17:20].strip() == 'T':\n", " nl = nl[:17] + ' DT' + nl[20:]\n", "\n", " if l[17:20].strip() == 'U':\n", " nl = nl[:17] + ' DU' + nl[20:]\n", "\n", " if l[17:20].strip() == 'A':\n", " nl = nl[:17] + ' DA' + nl[20:]\n", "\n", " lines.append(nl)\n", " if l.startswith(\"END\") or l.startswith(\"TER\"):\n", " lines.append(l)\n", "\n", " print('resn_as_dna')\n", " self.report.append(' resn_as_dna')\n", " self.lines = lines\n", "\n", " def fix_O_in_UC(self):\n", " \"\"\".. warning: remove RU names before using this function\"\"\"\n", " lines = []\n", " for l in self.lines:\n", " #if l[12:16].strip() in \n", " #if l[12:16].strip().startswith('H'):\n", " nl = l.replace('O U',\n", " 'O2 U')\n", " nl =nl.replace('O C',\n", " 'O2 C')\n", " lines.append(nl)\n", " self.lines = lines\n", "\n", "\n", " def fix_op_atoms(self):\n", " lines = []\n", " for l in self.lines:\n", " nl = l.replace('*', '\\'')\n", " nl = nl.replace('O1P', 'OP1')\n", " nl = nl.replace('O2P', 'OP2')\n", " nl = nl.replace('O3P', 'OP3')\n", " lines.append(nl)\n", " self.lines = lines\n", "\n", " def get_report(self):\n", " return '\\n'.join(self.report)\n", "\n", "\n", " def is_rna(self):\n", " wrong = []\n", " for r in self.res:\n", " if r.upper().strip() in ['RC', 'RU', 'RA', 'RG', 'RT']:\n", " if r not in wrong_res:\n", " wrong_res.append(r)\n", " return wrong_res\n", "\n", " def check_res_if_std_dna(self):\n", " wrong_res = []\n", " for r in self.res:\n", " if r.upper().strip() in ['A', 'T', 'C', 'G']:\n", " if r not in wrong_res:\n", " wrong_res.append(r)\n", " return wrong_res\n", "\n", " def check_res_if_supid_rna(self):\n", " wrong_res = []\n", " for r in self.res:\n", " if r.upper().strip() in ['RC', 'RU', 'RA', 'RG', 'RT']:\n", " if r not in wrong_res:\n", " wrong_res.append(r)\n", " return wrong_res\n", "\n", " def is_rna(self):\n", " for r in self.res:\n", " if r.upper().strip() in ['RC', 'RU', 'RA', 'RG', 'RT']:\n", " if r not in wrong_res:\n", " wrong_res.append(r)\n", " return wrong_res\n", "\n", " def renum_atoms(self):\n", " \"\"\"Renum atoms, from 1 to X for line; ATOM/HETATM\"\"\"\n", " lines = []\n", " c = 1\n", " for l in self.lines:\n", " l = l[:6] + str(c).rjust(5) + l[11:]\n", " c += 1\n", " lines.append(l)\n", " self.lines = lines\n", "\n", " def fix_resn(self):\n", " lines = []\n", " for l in self.lines:\n", " nl = l.replace( 'RA5', ' A') # RA should be the last!!!!\n", " nl = nl.replace('RA3', ' A')\n", " nl = nl.replace('ADE', ' A')\n", " nl = nl.replace(' RA', ' A')\n", " nl = nl.replace(' rA', ' A')\n", "\n", " nl = nl.replace('RC5', ' C')\n", " nl = nl.replace('RC3', ' C')\n", " nl = nl.replace('CYT', ' C')\n", " nl = nl.replace(' RC', ' C')\n", " nl = nl.replace(' rC', ' C')\n", "\n", " nl = nl.replace('RG5', ' G')\n", " nl = nl.replace('RG3', ' G')\n", " nl = nl.replace('GUA', ' G')\n", " nl = nl.replace(' RG', ' G')\n", " nl = nl.replace(' rG', ' G')\n", "\n", " nl = nl.replace('RU5', ' U')\n", " nl = nl.replace('RU3', ' U')\n", " nl = nl.replace('URA', ' U') \n", " nl = nl.replace(' RU', ' U')\n", " nl = nl.replace(' rU', ' U')\n", "\n", " nl = nl.replace('RT5', ' T')\n", " nl = nl.replace('RT3', ' T')\n", " nl = nl.replace('THY', ' T') \n", " nl = nl.replace(' RT', ' T')\n", " nl = nl.replace(' rT', ' T')\n", " \n", " lines.append(nl)\n", " \n", " self.lines = lines\n", "\n", " def check_res_if_std_prot(self):\n", " wrong = []\n", " for r in self.res:\n", " if r not in AMINOACID_CODES:\n", " wrong.append(r)\n", " return wrong\n", "\n", "\n", " def write(self, outfn,v=True):\n", " \"\"\"Write ```self.lines``` to a file (and END file\")\"\"\"\n", " f = open(outfn, 'w')\n", " for l in self.lines:\n", " f.write(l + '\\n')\n", " if not l.startswith('END'):\n", " f.write('END')\n", " f.close()\n", " if v:\n", " print('Write %s' % outfn)\n", "\n", " def get_rnapuzzle_ready(self, renumber_residues=True):\n", " \"\"\"Get rnapuzzle ready structure.\n", " Submission format @http://ahsoka.u-strasbg.fr/rnapuzzles/\n", "\n", " Does:\n", " - keep only given atoms,\n", " - renumber residues from 1, if renumber_residues=True (by default)\n", " \"\"\"\n", " try:\n", " from Bio import PDB\n", " from Bio.PDB import PDBIO\n", " except:\n", " sys.exit('Error: Install biopython to use this function (pip biopython)')\n", "\n", " import copy\n", "\n", " G_ATOMS = ['P', 'OP1', 'OP2', 'O5\\'', 'C5\\'', 'C4\\'', 'O4\\'', 'C3\\'', 'O3\\'', 'C2\\'', 'O2\\'', 'C1\\'', 'N9', 'C8', 'N7', 'C5', 'C6', 'O6', 'N1', 'C2', 'N2', 'N3', 'C4']\n", " A_ATOMS = \"P OP1 OP2 O5' C5' C4' O4' C3' O3' C2' O2' C1' N9 C8 N7 C5 C6 N6 N1 C2 N3 C4\".split()\n", " U_ATOMS = \"P OP1 OP2 O5' C5' C4' O4' C3' O3' C2' O2' C1' N1 C2 O2 N3 C4 O4 C5 C6\".split()\n", " C_ATOMS = \"P OP1 OP2 O5' C5' C4' O4' C3' O3' C2' O2' C1' N1 C2 O2 N3 C4 N4 C5 C6\".split()\n", "\n", " ftmp = '/tmp/out.pdb'\n", " self.write(ftmp,v=False)\n", "\n", " parser = PDB.PDBParser()\n", " struct = parser.get_structure('', ftmp)\n", " model = struct[0]\n", "\n", " s2 = PDB.Structure.Structure(struct.id)\n", " m2 = PDB.Model.Model(model.id)\n", "\n", " chains2 = []\n", "\n", " missing = []\n", " for chain in model.get_list():\n", " res = [] \n", " for r in chain:\n", " res.append(r)\n", "\n", " res = copy.copy(res)\n", "\n", " c2 = PDB.Chain.Chain(chain.id) \n", "\n", " c = 1 # new chain, goes from 1 !!!\n", " for r in res:\n", " # hack for amber/qrna\n", " r.resname = r.resname.strip()\n", " if r.resname == 'RC3': r.resname = 'C'\n", " if r.resname == 'RU3': r.resname = 'U'\n", " if r.resname == 'RG3': r.resname = 'G'\n", " if r.resname == 'RA3': r.resname = 'A'\n", "\n", " if r.resname == 'C3': r.resname = 'C'\n", " if r.resname == 'U3': r.resname = 'U'\n", " if r.resname == 'G3': r.resname = 'G'\n", " if r.resname == 'A3': r.resname = 'A'\n", "\n", " if r.resname == 'RC5': r.resname = 'C'\n", " if r.resname == 'RU5': r.resname = 'U'\n", " if r.resname == 'RG5': r.resname = 'G'\n", " if r.resname == 'RA5': r.resname = 'A'\n", "\n", " if r.resname == 'C5': r.resname = 'C'\n", " if r.resname == 'U5': r.resname = 'U'\n", " if r.resname == 'G5': r.resname = 'G'\n", " if r.resname == 'A5': r.resname = 'A'\n", "\n", " if r.resname.strip() == 'RC': r.resname = 'C'\n", " if r.resname.strip() == 'RU': r.resname = 'U'\n", " if r.resname.strip() == 'RG': r.resname = 'G'\n", " if r.resname.strip() == 'RA': r.resname = 'A'\n", "\n", " r2 = PDB.Residue.Residue(r.id, r.resname.strip(), r.segid)\n", " if renumber_residues:\n", " r2.id = (r2.id[0], c, r2.id[2]) ## renumber residues\n", " if str(r.get_resname()).strip() == \"G\":\n", "\n", " for an in G_ATOMS:\n", " try:\n", " r2.add(r[an])\n", " except KeyError:\n", " #print 'Missing:', an, r, ' new resi', c\n", " missing.append([an, chain.id, r, c])\n", " c2.add(r2)\n", "\n", " elif str(r.get_resname()).strip() == \"A\":\n", " for an in A_ATOMS:\n", " try:\n", " r2.add(r[an])\n", " except KeyError:\n", " #print 'Missing:', an, r, ' new resi', c\n", " missing.append([an, chain.id, r, c])\n", " c2.add(r2)\n", "\n", " elif str(r.get_resname()).strip() == \"C\":\n", " for an in C_ATOMS:\n", " try:\n", " r2.add(r[an])\n", " except:\n", " #print 'Missing:', an, r, ' new resi', c\n", " missing.append([an, chain.id, r, c])\n", " c2.add(r2)\n", "\n", " elif str(r.get_resname()).strip() == \"U\":\n", " for an in U_ATOMS:\n", " try:\n", " r2.add(r[an])\n", " except KeyError:\n", " #print 'Missing:', an, r,' new resi', c\n", " missing.append([an, chain.id, r, c])\n", " \n", " c2.add(r2)\n", " c += 1\n", " chains2.append(c2)\n", "\n", " io = PDBIO()\n", " s2.add(m2)\n", " for chain2 in chains2:\n", " m2.add(chain2) \n", " #print c2\n", " #print m2\n", " io.set_structure(s2)\n", " #fout = fn.replace('.pdb', '_fx.pdb')\n", " fout = '/tmp/outout.pdb' # hack\n", " io.save(fout)\n", " \n", " if missing:\n", " print('REMARK 000 Missing atoms:')\n", " for i in missing:\n", " print('REMARK 000 +', i[0], i[1], i[2], 'residue #', i[3])\n", " #raise Exception('Missing atoms in %s' % self.fn)\n", " s = StrucFile(fout)\n", " self.lines = s.lines\n", "\n", "\n", " def get_simrna_ready(self, renumber_residues=True):\n", " \"\"\"Get simrna_ready .. \n", "\n", " - take only first model,\n", " - renumber residues if renumber_residues=True\n", "\n", " .. warning:: requires: Biopython\"\"\"\n", " try:\n", " from Bio import PDB\n", " from Bio.PDB import PDBIO\n", " except:\n", " sys.exit('Error: Install biopython to use this function (pip biopython)')\n", "\n", " import warnings\n", " \n", " warnings.filterwarnings('ignore', '.*Invalid or missing.*',)\n", " warnings.filterwarnings('ignore', '.*with given element *',)\n", " \n", " import copy\n", "\n", " G_ATOMS = \"P OP1 OP2 O5' C5' C4' O4' C3' O3' C2' O2' C1' N9 C8 N7 C5 C6 O6 N1 C2 N2 N3 C4\".split()\n", " A_ATOMS = \"P OP1 OP2 O5' C5' C4' O4' C3' O3' C2' O2' C1' N9 C8 N7 C5 C6 N6 N1 C2 N3 C4\".split()\n", " U_ATOMS = \"P OP1 OP2 O5' C5' C4' O4' C3' O3' C2' O2' C1' N1 C2 O2 N3 C4 O4 C5 C6\".split()\n", " C_ATOMS = \"P OP1 OP2 O5' C5' C4' O4' C3' O3' C2' O2' C1' N1 C2 O2 N3 C4 N4 C5 C6\".split()\n", "\n", " ftmp = '/tmp/out.pdb'\n", " self.write(ftmp,v=False)\n", "\n", " parser = PDB.PDBParser()\n", " struct = parser.get_structure('', ftmp)\n", " model = struct[0]\n", "\n", " s2 = PDB.Structure.Structure(struct.id)\n", " m2 = PDB.Model.Model(model.id)\n", "\n", " chains2 = []\n", "\n", " missing = []\n", " \n", " for chain in model.get_list():\n", " res = [] \n", " for r in chain:\n", " res.append(r)\n", "\n", " res = copy.copy(res)\n", "\n", " c2 = PDB.Chain.Chain(chain.id) \n", "\n", " c = 1 # new chain, goes from 1 if renumber True\n", " for r in res:\n", " # hack for amber/qrna\n", " r.resname = r.resname.strip()\n", " if r.resname == 'RC3': r.resname = 'C'\n", " if r.resname == 'RU3': r.resname = 'U'\n", " if r.resname == 'RG3': r.resname = 'G'\n", " if r.resname == 'RA3': r.resname = 'A'\n", "\n", " if r.resname == 'C3': r.resname = 'C'\n", " if r.resname == 'U3': r.resname = 'U'\n", " if r.resname == 'G3': r.resname = 'G'\n", " if r.resname == 'A3': r.resname = 'A'\n", "\n", " if r.resname == 'RC5': r.resname = 'C'\n", " if r.resname == 'RU5': r.resname = 'U'\n", " if r.resname == 'RG5': r.resname = 'G'\n", " if r.resname == 'RA5': r.resname = 'A'\n", "\n", " if r.resname == 'C5': r.resname = 'C'\n", " if r.resname == 'U5': r.resname = 'U'\n", " if r.resname == 'G5': r.resname = 'G'\n", " if r.resname == 'A5': r.resname = 'A'\n", "\n", " if r.resname.strip() == 'RC': r.resname = 'C'\n", " if r.resname.strip() == 'RU': r.resname = 'U'\n", " if r.resname.strip() == 'RG': r.resname = 'G'\n", " if r.resname.strip() == 'RA': r.resname = 'A'\n", "\n", " r2 = PDB.Residue.Residue(r.id, r.resname.strip(), r.segid)\n", " if renumber_residues:\n", " r2.id = (r2.id[0], c, r2.id[2]) ## renumber residues\n", " if c == 1:\n", " p_missing = True\n", " #if p_missing:\n", " # try:\n", " # x = r[\"O5'\"]\n", " # x.id = ' P'\n", " # x.name = ' P'\n", " # x.fullname = ' P'\n", " # print \"REMARK 000 FIX O5' -> P fix in chain \", chain.id\n", " # except:\n", " # pass\n", " for a in r:\n", " if a.id == 'P':\n", " p_missing = False\n", "\n", " if p_missing:\n", " currfn = __file__\n", " if currfn == '':\n", " path = '.'\n", " else:\n", " path = os.path.dirname(currfn)\n", " if os.path.islink(currfn):#path + os.sep + os.path.basename(__file__)):\n", " path = os.path.dirname(os.readlink(path + os.sep + os.path.basename(currfn)))\n", "\n", " po3_struc = PDB.PDBParser().get_structure('', path + '/data/PO3_inner.pdb') \n", " po3 = [po3_atom for po3_atom in po3_struc[0].get_residues()][0]\n", "\n", " r_atoms = [r[\"O4'\"], r[\"C4'\"], r[\"C3'\"]]\n", " po3_atoms = [po3[\"O4'\"], po3[\"C4'\"], po3[\"C3'\"]]\n", "\n", " sup = PDB.Superimposer()\n", " sup.set_atoms(r_atoms, po3_atoms)\n", " rms = round(sup.rms, 3)\n", "\n", " sup.apply( po3_struc.get_atoms() ) # to all atoms of po3\n", "\n", " r.add( po3['P'])\n", " r.add( po3['OP1'])\n", " r.add( po3['OP2'])\n", " try:\n", " r.add( po3[\"O5'\"]) \n", " except:\n", " del r[\"O5'\"] \n", " r.add( po3[\"O5'\"]) \n", "\n", " p_missing = False # off this function\n", "\n", " # save it\n", " #io = PDB.PDBIO()\n", " #io.set_structure( po3_struc )\n", " #io.save(\"po3.pdb\")\n", "\n", " if str(r.get_resname()).strip() == \"G\":\n", " for an in G_ATOMS:\n", " if c == 1 and ignore_op3:\n", " if an in ['P', 'OP1', 'OP2']:\n", " continue\n", " try:\n", " if c == 1 and an == \"O5'\" and p_missing:\n", " r2.add(x)\n", " else:\n", " r2.add(r[an])\n", " except KeyError:\n", " #print 'Missing:', an, r, ' new resi', c\n", " missing.append([an, chain.id, r, c])\n", " c2.add(r2)\n", "\n", " elif str(r.get_resname()).strip() == \"A\":\n", " for an in A_ATOMS:\n", " if c == 1 and ignore_op3:\n", " if an in ['P', 'OP1', 'OP2']:\n", " continue\n", " try:\n", " if c == 1 and an == \"O5'\" and p_missing:\n", " r2.add(x)\n", " else:\n", " r2.add(r[an])\n", " except KeyError:\n", " #print 'Missing:', an, r, ' new resi', c\n", " missing.append([an, chain.id, r, c])\n", " c2.add(r2)\n", "\n", " elif str(r.get_resname()).strip() == \"C\":\n", " for an in C_ATOMS:\n", " if c == 1 and ignore_op3:\n", " if an in ['P', 'OP1', 'OP2']:\n", " continue\n", " try:\n", " if c == 1 and an == \"O5'\" and p_missing:\n", " r2.add(x)\n", " else:\n", " r2.add(r[an])\n", " except:\n", " #print 'Missing:', an, r, ' new resi', c\n", " missing.append([an, chain.id, r, c])\n", " c2.add(r2)\n", "\n", " elif str(r.get_resname()).strip() == \"U\":\n", " for an in U_ATOMS:\n", " if c == 1 and ignore_op3:\n", " if an in ['P', 'OP1', 'OP2']:\n", " continue\n", " try:\n", " if c == 1 and an == \"O5'\" and p_missing:\n", " r2.add(x)\n", " else:\n", " r2.add(r[an])\n", " except KeyError:\n", " #print 'Missing:', an, r,' new resi', c\n", " missing.append([an, chain.id, r, c])\n", " c2.add(r2)\n", "\n", " c += 1\n", " chains2.append(c2)\n", "\n", " io = PDBIO()\n", " s2.add(m2)\n", " for chain2 in chains2:\n", " m2.add(chain2) \n", " #print c2\n", " #print m2\n", " io.set_structure(s2)\n", " #fout = fn.replace('.pdb', '_fx.pdb')\n", " fout = '/tmp/outout.pdb' # hack\n", " io.save(fout)\n", " \n", " if missing:\n", " print('REMARK 000 Missing atoms:')\n", " for i in missing:\n", " print('REMARK 000 +', i[0], i[1], i[2], 'residue #', i[3])\n", " #raise Exception('Missing atoms in %s' % self.fn)\n", " s = StrucFile(fout)\n", " self.lines = s.lines\n", "\n", " def edit_occupancy_of_pdb(txt, pdb, pdb_out,v=False):\n", " \"\"\"Make all atoms 1 (flexi) and then set occupancy 0 for seletected atoms.\n", " Return False if error. True if OK\n", " \"\"\"\n", " struc = PDB.PDBParser().get_structure('struc', pdb)\n", "\n", " txt = txt.replace(' ','')\n", " if v:print(txt)\n", " l = re.split('[,:;]', txt)\n", " if v:print(l) \n", "\n", " for s in struc:\n", " for c in s:\n", " for r in c:\n", " for a in r:\n", " a.set_occupancy(1) # make it flaxi\n", "\n", " for i in l: # ['A', '1-10', '15', '25-30', 'B', '1-10']\n", "\n", " if i in string.ascii_letters:\n", " if v:print('chain', i)\n", " chain_curr = i\n", " continue\n", "\n", " if i.find('-') > -1:\n", " start, ends = i.split('-')\n", " if start > ends:\n", " print('Error: range start > end ' + i, file=sys.stderr)\n", " return False\n", " index = list(range(int(start), int(ends)+1))\n", " else:\n", " index=[int(i)]\n", "\n", " for i in index:\n", " # change b_factor\n", " try:\n", " atoms = struc[0][chain_curr][i]\n", " except KeyError:\n", " if i == chain_curr:\n", " print('Error: Chain ' + chain_curr + ' not found in the PDB structure', file=sys.stderr)\n", " else:\n", " print('Error: Residue ' + chain_curr + ':' + str(i) + ' found in the PDB structure', file=sys.stderr)\n", " return False\n", " for a in atoms:\n", " a.set_occupancy(0)\n", "\n", " io = PDBIO()\n", " io.set_structure(struc)\n", " io.save(pdb_out)\n", " print('Saved ', pdb_out)\n", " return True\n", "\n", "# main\n", "if '__main__' == __name__:\n", " fn = 'input/image'\n", " print('fn:', fn)\n", " struc = StrucFile(fn)\n", " print(' pdb?:', struc.is_it_pdb())\n", " print(' # atoms:', struc.get_no_lines())\n", "\n", " fn = 'input/na.pdb'\n", " s = StrucFile(fn)\n", " print(s.detect_molecule_type())\n", " #res = get_all_res(na)\n", " #print 'what is?', what_is(res)\n", " #print res\n", " print('non standard:', s.check_res_if_std_na())\n", " print('is protein:', s.detect_molecule_type())\n", "\n", " fn = 'input/prot.pdb'\n", " s = StrucFile(fn)\n", " print('non standard:', s.check_res_if_std_prot())\n", " print('is protein:', s.detect_molecule_type())\n", "\n", "\n", " fn = 'input/rna-ru.pdb'\n", " s = StrucFile(fn)\n", " print('non standard:', s.check_res_if_supid_rna())\n", " print('is protein:', s.detect_molecule_type())\n", "\n", " fn = 'input/na_highAtomNum.pdb'\n", " print(fn)\n", " s = StrucFile(fn)\n", " s.renum_atoms()\n", " s.write('output/na_highAtomNum.pdb')\n", "\n", " fn = 'input/na_solvet_old_format.pdb'\n", " print(fn)\n", " s = StrucFile(fn)\n", " s.fix_op_atoms()\n", " s.remove_hydrogen()\n", " s.remove_ion()\n", " s.remove_water()\n", " s.write('output/na_solvet_old_format.pdb')\n", "\n", " fn = 'input/na_solvet_old_format.pdb'\n", " print(fn)\n", " s = StrucFile(fn)\n", " s.fix_resn()\n", " s.remove_hydrogen()\n", " s.remove_ion()\n", " s.remove_water()\n", " s.write('output/na_solvet_old_format.pdb')\n", "\n", " #fn = 'input/na_solvet_old_format__.pdb'\n", " #s = StrucFile(fn)\n", " #s.fix_resn()\n", " #s.remove_hydrogen()\n", " #s.remove_ion()\n", " #s.remove_water()\n", " #s.renum_atoms()\n", " #s.fix_op_atoms()\n", " #s.write('output/na_solvet_old_format__.pdb')\n", "\n", "\n", " fn = 'input/1xjr.pdb'\n", " s.fix_resn()\n", " s.remove_hydrogen()\n", " s.remove_ion()\n", " s.remove_water()\n", " s.renum_atoms()\n", " s.fix_op_atoms()\n", " s.write('output/1xjr.pdb')\n", "\n", " fn = 'input/decoy0165_amb.pdb'\n", " print(fn)\n", " s = StrucFile(fn)\n", " s.fix_resn()\n", " s.remove_hydrogen()\n", " s.remove_ion()\n", " s.remove_water()\n", " s.renum_atoms()\n", " s.fix_O_in_UC()\n", " s.fix_op_atoms()\n", " s.write('output/decoy0165_amb_clx.pdb')\n", "\n", " fn = 'input/farna.pdb'\n", " print(fn)\n", " s = StrucFile(fn)\n", " s.fix_resn()\n", " s.remove_hydrogen()\n", " s.remove_ion()\n", " s.remove_water()\n", " s.fix_op_atoms()\n", " s.renum_atoms()\n", " s.write('output/farna.pdb')\n", "\n", " fn = 'input/farna.pdb'\n", " print(fn)\n", "\n", " r = StrucFile(fn)\n", " print(r.is_mol2())\n", "\n", " if True:\n", " print('================================================')\n", " print(\"input/1xjr_clx_fChimera_noIncludeNumbers.mol2\")\n", " r = StrucFile(\"input/1xjr_clx_fChimera_noIncludeNumbers.mol2\")\n", " print(r.is_mol2())\n", " r.mol2toPDB('/tmp/x.pdb')\n", "\n", " r = StrucFile('/tmp/x.pdb')\n", " print(r.get_report())\n", " r.fix_resn()\n", " r.remove_hydrogen()\n", " r.remove_ion()\n", " r.remove_water()\n", " r.fix_op_atoms()\n", " r.renum_atoms()\n", " r.fixU__to__U()\n", " r.write(\"output/1xjr_clx_fChimera_noIncludeNumbers.mol2\")\n", "\n", " if True:\n", " r = StrucFile(\"input/2du3_prot_bound.mol2\")\n", " print(r.is_mol2())\n", " outfn = r.mol2toPDB()\n", " print(r.get_report())\n", "\n", " print('================================================')\n", " fn = \"input/3e5fA-nogtp_processed_zephyr.pdb\"\n", " r = StrucFile(fn)\n", " print(r.is_mol2())\n", " #outfn = r.mol2toPDB()\n", " print(r.is_amber_like())\n", " print(r.get_report())\n", "\n", " print(r.get_preview())\n", "\n", " r.fix_resn()\n", "\n", " print(r.get_preview())\n", "\n", " r.remove_hydrogen()\n", " r.remove_ion()\n", " r.remove_water()\n", " #renum_atoms(t, t)\n", " #fix_O_in_UC(t, t)\n", " #fix_op_atoms(t, t)\n", " r.write('output/3e5fA-nogtp_processed_zephyr.pdb')\n", "\n", " print()\n", " fn = \"input/1xjr_clx_charmm.pdb\"\n", " print(fn)\n", " s = StrucFile(fn)\n", " s.fix_resn()\n", " s.remove_hydrogen()\n", " s.remove_ion()\n", " s.remove_water()\n", " s.write('output/1xjr_clx_charmm.pdb')\n", "\n", " #renum_atoms(t, t)\n", " #fix_O_in_UC(t, t)\n", " #fix_op_atoms(t, t)\n", "\n", " print()\n", " fn = \"input/dna_fconvpdb_charmm22.pdb\"\n", " print(fn)\n", " r = StrucFile(fn)\n", " r.get_preview()\n", " r.resn_as_dna()\n", " r.remove_hydrogen()\n", " r.remove_ion()\n", " r.remove_water()\n", " r.fix_resn()\n", " print(r.get_head())\n", " print(r.get_tail())\n", " print(r.get_preview())\n", " r.write(\"output/dna_fconvpdb_charmm22.pdb\")\n", "\n", "\n", " print()\n", " fn = \"input/1a9l_NMR_1_2_models.pdb\"\n", " print(fn)\n", " r = StrucFile(fn)\n", " r.write(\"output/1a9l_NMR_1_2_models_lib.pdb\")\n", " #r.get_text() # get #1 model\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0.013333333333333334, 0.030303030303030304, 0, 0, 0, 0.0037313432835820895, 0.03125, 0.03571428571428571, 0, 0.017391304347826087, 0.0196078431372549, 0.015873015873015872, 0.00975609756097561, 0, 0.1, 0.09090909090909091, 0.027777777777777776, 0.1, 0.07142857142857142, 0, 0, 0, 0.061224489795918366, 0, 0.047619047619047616, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0, 0, 0.02040816326530612, 0.02631578947368421, 0.011627906976744186, 0.03571428571428571, 0.02, 0, 0, 0, 0, 0.02702702702702703, 0.013513513513513514, 0, 0, 0, 0.058823529411764705, 0, 0, 0, 0.2, 0, 0.015384615384615385, 0, 0, 0, 0, 0, 0, 0.041666666666666664, 0, 0, 0, 0, 0, 0, 0.009174311926605505, 0, 0, 0, 0, 0, 0.07692307692307693, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.034482758620689655, 0.015625, 0.03, 0, 0, 0.021739130434782608, 0, 0.020833333333333332, 0, 0.020833333333333332, 0, 0.020833333333333332, 0.018518518518518517, 0, 0, 0.1111111111111111, 0, 0, 0, 0.034482758620689655, 0.015625, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04, 0, 0, 0, 0, 0.07692307692307693, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.034482758620689655, 0, 0.027777777777777776, 0, 0, 0, 0, 0, 0, 0, 0, 0.034482758620689655, 0.015625, 0, 0, 0, 0, 0, 0, 0, 0.034482758620689655, 0, 0, 0, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0, 0, 0, 0, 0, 0.006097560975609756, 0.1111111111111111, 0, 0, 0, 0, 0.05128205128205128, 0, 0.034482758620689655, 0.015625, 0, 0, 0, 0.016666666666666666, 0, 0.01818181818181818, 0, 0, 0, 0, 0, 0.01639344262295082, 0, 0, 0, 0.031578947368421054, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05128205128205128, 0, 0.034482758620689655, 0.015625, 0, 0, 0, 0.016666666666666666, 0, 0.01818181818181818, 0, 0, 0, 0, 0, 0.01639344262295082, 0, 0, 0, 0.031578947368421054, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0.024390243902439025, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0.034482758620689655, 0, 0, 0, 0.04, 0, 0, 0.03125, 0, 0, 0, 0, 0, 0.05, 0.034482758620689655, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0.023529411764705882, 0.023809523809523808, 0, 0, 0.034482758620689655, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.034482758620689655, 0.015625, 0, 0, 0, 0, 0, 0.018867924528301886, 0.034482758620689655, 0.045454545454545456, 0.022222222222222223, 0.022222222222222223, 0.022222222222222223, 0.022222222222222223, 0, 0, 0, 0, 0, 0, 0, 0.034482758620689655, 0.015625, 0.04, 0.02702702702702703, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.034482758620689655, 0.05405405405405406, 0.02, 0, 0, 0.02631578947368421, 0, 0, 0, 0, 0, 0.03571428571428571, 0, 0.034482758620689655, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.045454545454545456, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.034482758620689655, 0.02040816326530612, 0, 0, 0, 0, 0, 0, 0.034482758620689655, 0.02857142857142857, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.022727272727272728, 0, 0, 0, 0, 0, 0.022727272727272728, 0, 0, 0.07692307692307693, 0, 0.07692307692307693, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05714285714285714, 0, 0, 0.034482758620689655, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0625, 0.011627906976744186, 0, 0, 0, 0.005681818181818182, 0.009615384615384616, 0.01020408163265306, 0.01020408163265306, 0, 0, 0.030303030303030304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.045454545454545456, 0, 0, 0, 0, 0, 0.0196078431372549, 0, 0, 0, 0, 0, 0.01818181818181818, 0.01818181818181818, 0.01818181818181818, 0.01818181818181818, 0, 0.018518518518518517, 0.018518518518518517, 0.018518518518518517, 0.018518518518518517, 0, 0.01818181818181818, 0.01818181818181818, 0.01818181818181818, 0.01818181818181818, 0, 0.018518518518518517, 0.018518518518518517, 0.018518518518518517, 0.018518518518518517, 0, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0, 0, 0, 0.0273972602739726, 0, 0, 0, 0, 0, 0, 0.014492753623188406, 0, 0, 0, 0, 0, 0, 0, 0, 0.014492753623188406, 0, 0, 0, 0, 0, 0, 0, 0.03125, 0.014492753623188406, 0, 0, 0, 0, 0, 0, 0, 0, 0.014705882352941176, 0, 0.047619047619047616, 0, 0, 0, 0, 0, 0, 0, 0.03571428571428571, 0.05555555555555555, 0.05555555555555555, 0, 0.021739130434782608, 0.025, 0, 0.1111111111111111, 0, 0, 0, 0, 0.016129032258064516, 0, 0, 0, 0, 0.017543859649122806, 0.03125, 0, 0, 0, 0, 0, 0, 0, 0, 0.0625, 0.011627906976744186, 0, 0, 0.1111111111111111, 0, 0, 0.1111111111111111, 0, 0, 0.009345794392523364, 0.009615384615384616, 0.01020408163265306, 0.01020408163265306, 0, 0, 0.030303030303030304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1111111111111111, 0, 0.045454545454545456, 0, 0, 0, 0, 0, 0.0196078431372549, 0, 0, 0, 0, 0, 0.01818181818181818, 0.01818181818181818, 0.01818181818181818, 0.01818181818181818, 0, 0.018518518518518517, 0.018518518518518517, 0.018518518518518517, 0.018518518518518517, 0, 0.01818181818181818, 0.01818181818181818, 0.01818181818181818, 0.01818181818181818, 0, 0.018518518518518517, 0.018518518518518517, 0.018518518518518517, 0.018518518518518517, 0, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0.016129032258064516, 0, 0, 0, 0.0273972602739726, 0, 0, 0.02857142857142857, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0, 0.021739130434782608, 0, 0, 0, 0, 0.03, 0.00909090909090909, 0, 0.01904761904761905, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0, 0, 0.047058823529411764, 0, 0.022222222222222223, 0.02127659574468085, 0.02127659574468085, 0, 0.038461538461538464, 0.027777777777777776, 0.021739130434782608, 0.038461538461538464, 0, 0.017241379310344827, 0, 0, 0.02631578947368421, 0.0196078431372549, 0.025, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.014492753623188406, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.014492753623188406, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03125, 0.014492753623188406, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.014705882352941176, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03571428571428571, 0.05555555555555555, 0.05555555555555555, 0, 0.021739130434782608, 0.025, 0, 0.1111111111111111, 0, 0, 0, 0, 0.016129032258064516, 0, 0, 0, 0.017241379310344827, 0.012048192771084338, 0, 0, 0, 0, 0.029411764705882353, 0.08333333333333333, 0.02857142857142857, 0.13043478260869565, 0, 0, 0, 0, 0, 0, 0, 0.015625, 0, 0, 0.05128205128205128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03225806451612903, 0, 0, 0, 0, 0, 0, 0, 0.008849557522123894, 0, 0.007936507936507936, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.037037037037037035, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.037037037037037035, 0.027777777777777776, 0.06666666666666667, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03571428571428571, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.022222222222222223, 0.043478260869565216, 0.05555555555555555, 0.04, 0.05, 0.045454545454545456, 0.047619047619047616, 0.045454545454545456, 0.02, 0, 0, 0.038461538461538464, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.037037037037037035, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.043478260869565216, 0.043478260869565216, 0.041666666666666664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.043478260869565216, 0.043478260869565216, 0.041666666666666664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.08333333333333333, 0, 0, 0, 0, 0.030303030303030304 ]
1,118
0.007457
from _pydev_bundle.pydev_imports import xmlrpclib, _queue, Exec import sys from _pydevd_bundle.pydevd_constants import IS_JYTHON from _pydev_imps import _pydev_thread as thread from _pydevd_bundle import pydevd_xml from _pydevd_bundle import pydevd_vars from _pydevd_bundle.pydevd_utils import * # @UnusedWildImport import traceback #======================================================================================================================= # Null #======================================================================================================================= class Null: """ Gotten from: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205 """ def __init__(self, *args, **kwargs): return None def __call__(self, *args, **kwargs): return self def __getattr__(self, mname): return self def __setattr__(self, name, value): return self def __delattr__(self, name): return self def __repr__(self): return "<Null>" def __str__(self): return "Null" def __len__(self): return 0 def __getitem__(self): return self def __setitem__(self, *args, **kwargs): pass def write(self, *args, **kwargs): pass def __nonzero__(self): return 0 #======================================================================================================================= # BaseStdIn #======================================================================================================================= class BaseStdIn: def __init__(self, *args, **kwargs): try: self.encoding = sys.stdin.encoding except: #Not sure if it's available in all Python versions... pass def readline(self, *args, **kwargs): #sys.stderr.write('Cannot readline out of the console evaluation\n') -- don't show anything #This could happen if the user had done input('enter number).<-- upon entering this, that message would appear, #which is not something we want. return '\n' def isatty(self): return False #not really a file def write(self, *args, **kwargs): pass #not available StdIn (but it can be expected to be in the stream interface) def flush(self, *args, **kwargs): pass #not available StdIn (but it can be expected to be in the stream interface) def read(self, *args, **kwargs): #in the interactive interpreter, a read and a readline are the same. return self.readline() def close(self, *args, **kwargs): pass #expected in StdIn #======================================================================================================================= # StdIn #======================================================================================================================= class StdIn(BaseStdIn): ''' Object to be added to stdin (to emulate it as non-blocking while the next line arrives) ''' def __init__(self, interpreter, host, client_port): BaseStdIn.__init__(self) self.interpreter = interpreter self.client_port = client_port self.host = host def readline(self, *args, **kwargs): #Ok, callback into the client to get the new input try: server = xmlrpclib.Server('http://%s:%s' % (self.host, self.client_port)) requested_input = server.RequestInput() if not requested_input: return '\n' #Yes, a readline must return something (otherwise we can get an EOFError on the input() call). return requested_input except: return '\n' class CodeFragment: def __init__(self, text, is_single_line=True): self.text = text self.is_single_line = is_single_line def append(self, code_fragment): self.text = self.text + "\n" + code_fragment.text if not code_fragment.is_single_line: self.is_single_line = False #======================================================================================================================= # BaseInterpreterInterface #======================================================================================================================= class BaseInterpreterInterface: def __init__(self, mainThread): self.mainThread = mainThread self.interruptable = False self.exec_queue = _queue.Queue(0) self.buffer = None def need_more_for_code(self, source): # PyDev-502: PyDev 3.9 F2 doesn't support backslash continuations # Strangely even the IPython console is_complete said it was complete # even with a continuation char at the end. if source.endswith('\\'): return True if hasattr(self.interpreter, 'is_complete'): return not self.interpreter.is_complete(source) try: code = self.interpreter.compile(source, '<input>', 'exec') except (OverflowError, SyntaxError, ValueError): # Case 1 return False if code is None: # Case 2 return True # Case 3 return False def need_more(self, code_fragment): if self.buffer is None: self.buffer = code_fragment else: self.buffer.append(code_fragment) return self.need_more_for_code(self.buffer.text) def create_std_in(self): return StdIn(self, self.host, self.client_port) def add_exec(self, code_fragment): original_in = sys.stdin try: help = None if 'pydoc' in sys.modules: pydoc = sys.modules['pydoc'] #Don't import it if it still is not there. if hasattr(pydoc, 'help'): #You never know how will the API be changed, so, let's code defensively here help = pydoc.help if not hasattr(help, 'input'): help = None except: #Just ignore any error here pass more = False try: sys.stdin = self.create_std_in() try: if help is not None: #This will enable the help() function to work. try: try: help.input = sys.stdin except AttributeError: help._input = sys.stdin except: help = None if not self._input_error_printed: self._input_error_printed = True sys.stderr.write('\nError when trying to update pydoc.help.input\n') sys.stderr.write('(help() may not work -- please report this as a bug in the pydev bugtracker).\n\n') traceback.print_exc() try: self.start_exec() if hasattr(self, 'debugger'): from _pydevd_bundle import pydevd_tracing pydevd_tracing.SetTrace(self.debugger.trace_dispatch) more = self.do_add_exec(code_fragment) if hasattr(self, 'debugger'): from _pydevd_bundle import pydevd_tracing pydevd_tracing.SetTrace(None) self.finish_exec(more) finally: if help is not None: try: try: help.input = original_in except AttributeError: help._input = original_in except: pass finally: sys.stdin = original_in except SystemExit: raise except: traceback.print_exc() return more def do_add_exec(self, codeFragment): ''' Subclasses should override. @return: more (True if more input is needed to complete the statement and False if the statement is complete). ''' raise NotImplementedError() def get_namespace(self): ''' Subclasses should override. @return: dict with namespace. ''' raise NotImplementedError() def getDescription(self, text): try: obj = None if '.' not in text: try: obj = self.get_namespace()[text] except KeyError: return '' else: try: splitted = text.split('.') obj = self.get_namespace()[splitted[0]] for t in splitted[1:]: obj = getattr(obj, t) except: return '' if obj is not None: try: if sys.platform.startswith("java"): #Jython doc = obj.__doc__ if doc is not None: return doc from _pydev_bundle import _pydev_jy_imports_tipper is_method, infos = _pydev_jy_imports_tipper.ismethod(obj) ret = '' if is_method: for info in infos: ret += info.get_as_doc() return ret else: #Python and Iron Python import inspect #@UnresolvedImport doc = inspect.getdoc(obj) if doc is not None: return doc except: pass try: #if no attempt succeeded, try to return repr()... return repr(obj) except: try: #otherwise the class return str(obj.__class__) except: #if all fails, go to an empty string return '' except: traceback.print_exc() return '' def do_exec_code(self, code, is_single_line): try: code_fragment = CodeFragment(code, is_single_line) more = self.need_more(code_fragment) if not more: code_fragment = self.buffer self.buffer = None self.exec_queue.put(code_fragment) return more except: traceback.print_exc() return False def execLine(self, line): return self.do_exec_code(line, True) def execMultipleLines(self, lines): if IS_JYTHON: for line in lines.split('\n'): self.do_exec_code(line, True) else: return self.do_exec_code(lines, False) def interrupt(self): self.buffer = None # Also clear the buffer when it's interrupted. try: if self.interruptable: called = False try: # Fix for #PyDev-500: Console interrupt can't interrupt on sleep import os import signal if os.name == 'posix': # On Linux we can't interrupt 0 as in Windows because it's # actually owned by a process -- on the good side, signals # work much better on Linux! os.kill(os.getpid(), signal.SIGINT) called = True elif os.name == 'nt': # Stupid windows: sending a Ctrl+C to a process given its pid # is absurdly difficult. # There are utilities to make it work such as # http://www.latenighthacking.com/projects/2003/sendSignal/ # but fortunately for us, it seems Python does allow a CTRL_C_EVENT # for the current process in Windows if pid 0 is passed... if we needed # to send a signal to another process the approach would be # much more difficult. # Still, note that CTRL_C_EVENT is only Python 2.7 onwards... # Also, this doesn't seem to be documented anywhere!? (stumbled # upon it by chance after digging quite a lot). os.kill(0, signal.CTRL_C_EVENT) called = True except: # Many things to go wrong (from CTRL_C_EVENT not being there # to failing import signal)... if that's the case, ask for # forgiveness and go on to the approach which will interrupt # the main thread (but it'll only work when it's executing some Python # code -- not on sleep() for instance). pass if not called: if hasattr(thread, 'interrupt_main'): #Jython doesn't have it thread.interrupt_main() else: self.mainThread._thread.interrupt() #Jython return True except: traceback.print_exc() return False def close(self): sys.exit(0) def start_exec(self): self.interruptable = True def get_server(self): if getattr(self, 'host', None) is not None: return xmlrpclib.Server('http://%s:%s' % (self.host, self.client_port)) else: return None server = property(get_server) def finish_exec(self, more): self.interruptable = False server = self.get_server() if server is not None: return server.NotifyFinished(more) else: return True def getFrame(self): xml = "<xml>" xml += pydevd_xml.frame_vars_to_xml(self.get_namespace()) xml += "</xml>" return xml def getVariable(self, attributes): xml = "<xml>" valDict = pydevd_vars.resolve_var(self.get_namespace(), attributes) if valDict is None: valDict = {} keys = valDict.keys() for k in keys: xml += pydevd_vars.var_to_xml(valDict[k], to_string(k)) xml += "</xml>" return xml def getArray(self, attr, roffset, coffset, rows, cols, format): xml = "<xml>" name = attr.split("\t")[-1] array = pydevd_vars.eval_in_context(name, self.get_namespace(), self.get_namespace()) array, metaxml, r, c, f = pydevd_vars.array_to_meta_xml(array, name, format) xml += metaxml format = '%' + f if rows == -1 and cols == -1: rows = r cols = c xml += pydevd_vars.array_to_xml(array, roffset, coffset, rows, cols, format) xml += "</xml>" return xml def evaluate(self, expression): xml = "<xml>" result = pydevd_vars.eval_in_context(expression, self.get_namespace(), self.get_namespace()) xml += pydevd_vars.var_to_xml(result, expression) xml += "</xml>" return xml def changeVariable(self, attr, value): def do_change_variable(): Exec('%s=%s' % (attr, value), self.get_namespace(), self.get_namespace()) # Important: it has to be really enabled in the main thread, so, schedule # it to run in the main thread. self.exec_queue.put(do_change_variable) def _findFrame(self, thread_id, frame_id): ''' Used to show console with variables connection. Always return a frame where the locals map to our internal namespace. ''' VIRTUAL_FRAME_ID = "1" # matches PyStackFrameConsole.java VIRTUAL_CONSOLE_ID = "console_main" # matches PyThreadConsole.java if thread_id == VIRTUAL_CONSOLE_ID and frame_id == VIRTUAL_FRAME_ID: f = FakeFrame() f.f_globals = {} #As globals=locals here, let's simply let it empty (and save a bit of network traffic). f.f_locals = self.get_namespace() return f else: return self.orig_find_frame(thread_id, frame_id) def connectToDebugger(self, debuggerPort): ''' Used to show console with variables connection. Mainly, monkey-patches things in the debugger structure so that the debugger protocol works. ''' def do_connect_to_debugger(): try: # Try to import the packages needed to attach the debugger import pydevd from _pydev_imps import _pydev_threading as threading except: # This happens on Jython embedded in host eclipse traceback.print_exc() sys.stderr.write('pydevd is not available, cannot connect\n',) from _pydev_bundle import pydev_localhost threading.currentThread().__pydevd_id__ = "console_main" self.orig_find_frame = pydevd_vars.find_frame pydevd_vars.find_frame = self._findFrame self.debugger = pydevd.PyDB() try: self.debugger.connect(pydev_localhost.get_localhost(), debuggerPort) self.debugger.prepare_to_run() from _pydevd_bundle import pydevd_tracing pydevd_tracing.SetTrace(None) except: traceback.print_exc() sys.stderr.write('Failed to connect to target debugger.\n') # Register to process commands when idle self.debugrunning = False try: import pydevconsole pydevconsole.set_debug_hook(self.debugger.process_internal_commands) except: traceback.print_exc() sys.stderr.write('Version of Python does not support debuggable Interactive Console.\n') # Important: it has to be really enabled in the main thread, so, schedule # it to run in the main thread. self.exec_queue.put(do_connect_to_debugger) return ('connect complete',) def hello(self, input_str): # Don't care what the input string is return ("Hello eclipse",) def enableGui(self, guiname): ''' Enable the GUI specified in guiname (see inputhook for list). As with IPython, enabling multiple GUIs isn't an error, but only the last one's main loop runs and it may not work ''' def do_enable_gui(): from _pydev_bundle.pydev_versioncheck import versionok_for_gui if versionok_for_gui(): try: from pydev_ipython.inputhook import enable_gui enable_gui(guiname) except: sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname) traceback.print_exc() elif guiname not in ['none', '', None]: # Only print a warning if the guiname was going to do something sys.stderr.write("PyDev console: Python version does not support GUI event loop integration for '%s'\n" % guiname) # Return value does not matter, so return back what was sent return guiname # Important: it has to be really enabled in the main thread, so, schedule # it to run in the main thread. self.exec_queue.put(do_enable_gui) #======================================================================================================================= # FakeFrame #======================================================================================================================= class FakeFrame: ''' Used to show console with variables connection. A class to be used as a mock of a frame. '''
[ "from _pydev_bundle.pydev_imports import xmlrpclib, _queue, Exec\n", "import sys\n", "from _pydevd_bundle.pydevd_constants import IS_JYTHON\n", "from _pydev_imps import _pydev_thread as thread\n", "from _pydevd_bundle import pydevd_xml\n", "from _pydevd_bundle import pydevd_vars\n", "from _pydevd_bundle.pydevd_utils import * # @UnusedWildImport\n", "import traceback\n", "\n", "#=======================================================================================================================\n", "# Null\n", "#=======================================================================================================================\n", "class Null:\n", " \"\"\"\n", " Gotten from: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205\n", " \"\"\"\n", "\n", " def __init__(self, *args, **kwargs):\n", " return None\n", "\n", " def __call__(self, *args, **kwargs):\n", " return self\n", "\n", " def __getattr__(self, mname):\n", " return self\n", "\n", " def __setattr__(self, name, value):\n", " return self\n", "\n", " def __delattr__(self, name):\n", " return self\n", "\n", " def __repr__(self):\n", " return \"<Null>\"\n", "\n", " def __str__(self):\n", " return \"Null\"\n", "\n", " def __len__(self):\n", " return 0\n", "\n", " def __getitem__(self):\n", " return self\n", "\n", " def __setitem__(self, *args, **kwargs):\n", " pass\n", "\n", " def write(self, *args, **kwargs):\n", " pass\n", "\n", " def __nonzero__(self):\n", " return 0\n", "\n", "\n", "#=======================================================================================================================\n", "# BaseStdIn\n", "#=======================================================================================================================\n", "class BaseStdIn:\n", " def __init__(self, *args, **kwargs):\n", " try:\n", " self.encoding = sys.stdin.encoding\n", " except:\n", " #Not sure if it's available in all Python versions...\n", " pass\n", "\n", " def readline(self, *args, **kwargs):\n", " #sys.stderr.write('Cannot readline out of the console evaluation\\n') -- don't show anything\n", " #This could happen if the user had done input('enter number).<-- upon entering this, that message would appear,\n", " #which is not something we want.\n", " return '\\n'\n", "\n", " def isatty(self):\n", " return False #not really a file\n", "\n", " def write(self, *args, **kwargs):\n", " pass #not available StdIn (but it can be expected to be in the stream interface)\n", "\n", " def flush(self, *args, **kwargs):\n", " pass #not available StdIn (but it can be expected to be in the stream interface)\n", "\n", " def read(self, *args, **kwargs):\n", " #in the interactive interpreter, a read and a readline are the same.\n", " return self.readline()\n", "\n", " def close(self, *args, **kwargs):\n", " pass #expected in StdIn\n", "\n", "\n", "#=======================================================================================================================\n", "# StdIn\n", "#=======================================================================================================================\n", "class StdIn(BaseStdIn):\n", " '''\n", " Object to be added to stdin (to emulate it as non-blocking while the next line arrives)\n", " '''\n", "\n", " def __init__(self, interpreter, host, client_port):\n", " BaseStdIn.__init__(self)\n", " self.interpreter = interpreter\n", " self.client_port = client_port\n", " self.host = host\n", "\n", " def readline(self, *args, **kwargs):\n", " #Ok, callback into the client to get the new input\n", " try:\n", " server = xmlrpclib.Server('http://%s:%s' % (self.host, self.client_port))\n", " requested_input = server.RequestInput()\n", " if not requested_input:\n", " return '\\n' #Yes, a readline must return something (otherwise we can get an EOFError on the input() call).\n", " return requested_input\n", " except:\n", " return '\\n'\n", "\n", "\n", "class CodeFragment:\n", " def __init__(self, text, is_single_line=True):\n", " self.text = text\n", " self.is_single_line = is_single_line\n", "\n", " def append(self, code_fragment):\n", " self.text = self.text + \"\\n\" + code_fragment.text\n", " if not code_fragment.is_single_line:\n", " self.is_single_line = False\n", "\n", "#=======================================================================================================================\n", "# BaseInterpreterInterface\n", "#=======================================================================================================================\n", "class BaseInterpreterInterface:\n", " def __init__(self, mainThread):\n", " self.mainThread = mainThread\n", " self.interruptable = False\n", " self.exec_queue = _queue.Queue(0)\n", " self.buffer = None\n", "\n", " def need_more_for_code(self, source):\n", " # PyDev-502: PyDev 3.9 F2 doesn't support backslash continuations\n", "\n", " # Strangely even the IPython console is_complete said it was complete\n", " # even with a continuation char at the end.\n", " if source.endswith('\\\\'):\n", " return True\n", "\n", " if hasattr(self.interpreter, 'is_complete'):\n", " return not self.interpreter.is_complete(source)\n", " try:\n", " code = self.interpreter.compile(source, '<input>', 'exec')\n", " except (OverflowError, SyntaxError, ValueError):\n", " # Case 1\n", " return False\n", " if code is None:\n", " # Case 2\n", " return True\n", "\n", " # Case 3\n", " return False\n", "\n", " def need_more(self, code_fragment):\n", " if self.buffer is None:\n", " self.buffer = code_fragment\n", " else:\n", " self.buffer.append(code_fragment)\n", "\n", " return self.need_more_for_code(self.buffer.text)\n", "\n", " def create_std_in(self):\n", " return StdIn(self, self.host, self.client_port)\n", "\n", " def add_exec(self, code_fragment):\n", " original_in = sys.stdin\n", " try:\n", " help = None\n", " if 'pydoc' in sys.modules:\n", " pydoc = sys.modules['pydoc'] #Don't import it if it still is not there.\n", "\n", " if hasattr(pydoc, 'help'):\n", " #You never know how will the API be changed, so, let's code defensively here\n", " help = pydoc.help\n", " if not hasattr(help, 'input'):\n", " help = None\n", " except:\n", " #Just ignore any error here\n", " pass\n", "\n", " more = False\n", " try:\n", " sys.stdin = self.create_std_in()\n", " try:\n", " if help is not None:\n", " #This will enable the help() function to work.\n", " try:\n", " try:\n", " help.input = sys.stdin\n", " except AttributeError:\n", " help._input = sys.stdin\n", " except:\n", " help = None\n", " if not self._input_error_printed:\n", " self._input_error_printed = True\n", " sys.stderr.write('\\nError when trying to update pydoc.help.input\\n')\n", " sys.stderr.write('(help() may not work -- please report this as a bug in the pydev bugtracker).\\n\\n')\n", " traceback.print_exc()\n", "\n", " try:\n", " self.start_exec()\n", " if hasattr(self, 'debugger'):\n", " from _pydevd_bundle import pydevd_tracing\n", " pydevd_tracing.SetTrace(self.debugger.trace_dispatch)\n", "\n", " more = self.do_add_exec(code_fragment)\n", "\n", " if hasattr(self, 'debugger'):\n", " from _pydevd_bundle import pydevd_tracing\n", " pydevd_tracing.SetTrace(None)\n", "\n", " self.finish_exec(more)\n", " finally:\n", " if help is not None:\n", " try:\n", " try:\n", " help.input = original_in\n", " except AttributeError:\n", " help._input = original_in\n", " except:\n", " pass\n", "\n", " finally:\n", " sys.stdin = original_in\n", " except SystemExit:\n", " raise\n", " except:\n", " traceback.print_exc()\n", "\n", " return more\n", "\n", "\n", " def do_add_exec(self, codeFragment):\n", " '''\n", " Subclasses should override.\n", "\n", " @return: more (True if more input is needed to complete the statement and False if the statement is complete).\n", " '''\n", " raise NotImplementedError()\n", "\n", "\n", " def get_namespace(self):\n", " '''\n", " Subclasses should override.\n", "\n", " @return: dict with namespace.\n", " '''\n", " raise NotImplementedError()\n", "\n", "\n", " def getDescription(self, text):\n", " try:\n", " obj = None\n", " if '.' not in text:\n", " try:\n", " obj = self.get_namespace()[text]\n", " except KeyError:\n", " return ''\n", "\n", " else:\n", " try:\n", " splitted = text.split('.')\n", " obj = self.get_namespace()[splitted[0]]\n", " for t in splitted[1:]:\n", " obj = getattr(obj, t)\n", " except:\n", " return ''\n", "\n", " if obj is not None:\n", " try:\n", " if sys.platform.startswith(\"java\"):\n", " #Jython\n", " doc = obj.__doc__\n", " if doc is not None:\n", " return doc\n", "\n", " from _pydev_bundle import _pydev_jy_imports_tipper\n", "\n", " is_method, infos = _pydev_jy_imports_tipper.ismethod(obj)\n", " ret = ''\n", " if is_method:\n", " for info in infos:\n", " ret += info.get_as_doc()\n", " return ret\n", "\n", " else:\n", " #Python and Iron Python\n", " import inspect #@UnresolvedImport\n", "\n", " doc = inspect.getdoc(obj)\n", " if doc is not None:\n", " return doc\n", " except:\n", " pass\n", "\n", " try:\n", " #if no attempt succeeded, try to return repr()...\n", " return repr(obj)\n", " except:\n", " try:\n", " #otherwise the class\n", " return str(obj.__class__)\n", " except:\n", " #if all fails, go to an empty string\n", " return ''\n", " except:\n", " traceback.print_exc()\n", " return ''\n", "\n", "\n", " def do_exec_code(self, code, is_single_line):\n", " try:\n", " code_fragment = CodeFragment(code, is_single_line)\n", " more = self.need_more(code_fragment)\n", " if not more:\n", " code_fragment = self.buffer\n", " self.buffer = None\n", " self.exec_queue.put(code_fragment)\n", "\n", " return more\n", " except:\n", " traceback.print_exc()\n", " return False\n", "\n", " def execLine(self, line):\n", " return self.do_exec_code(line, True)\n", "\n", "\n", " def execMultipleLines(self, lines):\n", " if IS_JYTHON:\n", " for line in lines.split('\\n'):\n", " self.do_exec_code(line, True)\n", " else:\n", " return self.do_exec_code(lines, False)\n", "\n", "\n", " def interrupt(self):\n", " self.buffer = None # Also clear the buffer when it's interrupted.\n", " try:\n", " if self.interruptable:\n", " called = False\n", " try:\n", " # Fix for #PyDev-500: Console interrupt can't interrupt on sleep\n", " import os\n", " import signal\n", " if os.name == 'posix':\n", " # On Linux we can't interrupt 0 as in Windows because it's\n", " # actually owned by a process -- on the good side, signals\n", " # work much better on Linux!\n", " os.kill(os.getpid(), signal.SIGINT)\n", " called = True\n", "\n", " elif os.name == 'nt':\n", " # Stupid windows: sending a Ctrl+C to a process given its pid\n", " # is absurdly difficult.\n", " # There are utilities to make it work such as\n", " # http://www.latenighthacking.com/projects/2003/sendSignal/\n", " # but fortunately for us, it seems Python does allow a CTRL_C_EVENT\n", " # for the current process in Windows if pid 0 is passed... if we needed\n", " # to send a signal to another process the approach would be\n", " # much more difficult.\n", " # Still, note that CTRL_C_EVENT is only Python 2.7 onwards...\n", " # Also, this doesn't seem to be documented anywhere!? (stumbled\n", " # upon it by chance after digging quite a lot).\n", " os.kill(0, signal.CTRL_C_EVENT)\n", " called = True\n", " except:\n", " # Many things to go wrong (from CTRL_C_EVENT not being there\n", " # to failing import signal)... if that's the case, ask for\n", " # forgiveness and go on to the approach which will interrupt\n", " # the main thread (but it'll only work when it's executing some Python\n", " # code -- not on sleep() for instance).\n", " pass\n", "\n", " if not called:\n", " if hasattr(thread, 'interrupt_main'): #Jython doesn't have it\n", " thread.interrupt_main()\n", " else:\n", " self.mainThread._thread.interrupt() #Jython\n", " return True\n", " except:\n", " traceback.print_exc()\n", " return False\n", "\n", " def close(self):\n", " sys.exit(0)\n", "\n", " def start_exec(self):\n", " self.interruptable = True\n", "\n", " def get_server(self):\n", " if getattr(self, 'host', None) is not None:\n", " return xmlrpclib.Server('http://%s:%s' % (self.host, self.client_port))\n", " else:\n", " return None\n", "\n", " server = property(get_server)\n", "\n", " def finish_exec(self, more):\n", " self.interruptable = False\n", "\n", " server = self.get_server()\n", "\n", " if server is not None:\n", " return server.NotifyFinished(more)\n", " else:\n", " return True\n", "\n", " def getFrame(self):\n", " xml = \"<xml>\"\n", " xml += pydevd_xml.frame_vars_to_xml(self.get_namespace())\n", " xml += \"</xml>\"\n", "\n", " return xml\n", "\n", " def getVariable(self, attributes):\n", " xml = \"<xml>\"\n", " valDict = pydevd_vars.resolve_var(self.get_namespace(), attributes)\n", " if valDict is None:\n", " valDict = {}\n", "\n", " keys = valDict.keys()\n", "\n", " for k in keys:\n", " xml += pydevd_vars.var_to_xml(valDict[k], to_string(k))\n", "\n", " xml += \"</xml>\"\n", "\n", " return xml\n", "\n", " def getArray(self, attr, roffset, coffset, rows, cols, format):\n", " xml = \"<xml>\"\n", " name = attr.split(\"\\t\")[-1]\n", " array = pydevd_vars.eval_in_context(name, self.get_namespace(), self.get_namespace())\n", "\n", " array, metaxml, r, c, f = pydevd_vars.array_to_meta_xml(array, name, format)\n", " xml += metaxml\n", " format = '%' + f\n", " if rows == -1 and cols == -1:\n", " rows = r\n", " cols = c\n", " xml += pydevd_vars.array_to_xml(array, roffset, coffset, rows, cols, format)\n", " xml += \"</xml>\"\n", "\n", " return xml\n", "\n", " def evaluate(self, expression):\n", " xml = \"<xml>\"\n", " result = pydevd_vars.eval_in_context(expression, self.get_namespace(), self.get_namespace())\n", "\n", " xml += pydevd_vars.var_to_xml(result, expression)\n", "\n", " xml += \"</xml>\"\n", "\n", " return xml\n", "\n", " def changeVariable(self, attr, value):\n", " def do_change_variable():\n", " Exec('%s=%s' % (attr, value), self.get_namespace(), self.get_namespace())\n", "\n", " # Important: it has to be really enabled in the main thread, so, schedule\n", " # it to run in the main thread.\n", " self.exec_queue.put(do_change_variable)\n", "\n", " def _findFrame(self, thread_id, frame_id):\n", " '''\n", " Used to show console with variables connection.\n", " Always return a frame where the locals map to our internal namespace.\n", " '''\n", " VIRTUAL_FRAME_ID = \"1\" # matches PyStackFrameConsole.java\n", " VIRTUAL_CONSOLE_ID = \"console_main\" # matches PyThreadConsole.java\n", " if thread_id == VIRTUAL_CONSOLE_ID and frame_id == VIRTUAL_FRAME_ID:\n", " f = FakeFrame()\n", " f.f_globals = {} #As globals=locals here, let's simply let it empty (and save a bit of network traffic).\n", " f.f_locals = self.get_namespace()\n", " return f\n", " else:\n", " return self.orig_find_frame(thread_id, frame_id)\n", "\n", " def connectToDebugger(self, debuggerPort):\n", " '''\n", " Used to show console with variables connection.\n", " Mainly, monkey-patches things in the debugger structure so that the debugger protocol works.\n", " '''\n", " def do_connect_to_debugger():\n", " try:\n", " # Try to import the packages needed to attach the debugger\n", " import pydevd\n", " from _pydev_imps import _pydev_threading as threading\n", "\n", " except:\n", " # This happens on Jython embedded in host eclipse\n", " traceback.print_exc()\n", " sys.stderr.write('pydevd is not available, cannot connect\\n',)\n", "\n", " from _pydev_bundle import pydev_localhost\n", " threading.currentThread().__pydevd_id__ = \"console_main\"\n", "\n", " self.orig_find_frame = pydevd_vars.find_frame\n", " pydevd_vars.find_frame = self._findFrame\n", "\n", " self.debugger = pydevd.PyDB()\n", " try:\n", " self.debugger.connect(pydev_localhost.get_localhost(), debuggerPort)\n", " self.debugger.prepare_to_run()\n", " from _pydevd_bundle import pydevd_tracing\n", " pydevd_tracing.SetTrace(None)\n", " except:\n", " traceback.print_exc()\n", " sys.stderr.write('Failed to connect to target debugger.\\n')\n", "\n", " # Register to process commands when idle\n", " self.debugrunning = False\n", " try:\n", " import pydevconsole\n", " pydevconsole.set_debug_hook(self.debugger.process_internal_commands)\n", " except:\n", " traceback.print_exc()\n", " sys.stderr.write('Version of Python does not support debuggable Interactive Console.\\n')\n", "\n", " # Important: it has to be really enabled in the main thread, so, schedule\n", " # it to run in the main thread.\n", " self.exec_queue.put(do_connect_to_debugger)\n", "\n", " return ('connect complete',)\n", "\n", " def hello(self, input_str):\n", " # Don't care what the input string is\n", " return (\"Hello eclipse\",)\n", "\n", " def enableGui(self, guiname):\n", " ''' Enable the GUI specified in guiname (see inputhook for list).\n", " As with IPython, enabling multiple GUIs isn't an error, but\n", " only the last one's main loop runs and it may not work\n", " '''\n", " def do_enable_gui():\n", " from _pydev_bundle.pydev_versioncheck import versionok_for_gui\n", " if versionok_for_gui():\n", " try:\n", " from pydev_ipython.inputhook import enable_gui\n", " enable_gui(guiname)\n", " except:\n", " sys.stderr.write(\"Failed to enable GUI event loop integration for '%s'\\n\" % guiname)\n", " traceback.print_exc()\n", " elif guiname not in ['none', '', None]:\n", " # Only print a warning if the guiname was going to do something\n", " sys.stderr.write(\"PyDev console: Python version does not support GUI event loop integration for '%s'\\n\" % guiname)\n", " # Return value does not matter, so return back what was sent\n", " return guiname\n", "\n", " # Important: it has to be really enabled in the main thread, so, schedule\n", " # it to run in the main thread.\n", " self.exec_queue.put(do_enable_gui)\n", "\n", "#=======================================================================================================================\n", "# FakeFrame\n", "#=======================================================================================================================\n", "class FakeFrame:\n", " '''\n", " Used to show console with variables connection.\n", " A class to be used as a mock of a frame.\n", " '''" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01652892561983471, 0, 0.01652892561983471, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01652892561983471, 0, 0.01652892561983471, 0, 0, 0, 0, 0.0625, 0.015151515151515152, 0, 0, 0, 0.02, 0.016666666666666666, 0.024390243902439025, 0, 0, 0, 0.05, 0, 0, 0.033707865168539325, 0, 0, 0.033707865168539325, 0, 0, 0.012987012987012988, 0, 0, 0, 0.0625, 0, 0, 0.01652892561983471, 0, 0.01652892561983471, 0, 0, 0.010416666666666666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01694915254237288, 0, 0.011627906976744186, 0, 0, 0.024390243902439025, 0, 0.0625, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01652892561983471, 0, 0.01652892561983471, 0.03125, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03409090909090909, 0, 0, 0.020618556701030927, 0, 0, 0, 0.0625, 0.025, 0, 0, 0, 0, 0, 0, 0, 0.014925373134328358, 0, 0, 0, 0, 0, 0.03571428571428571, 0, 0, 0, 0.010309278350515464, 0.007692307692307693, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03125, 0, 0, 0, 0, 0, 0, 0.0625, 0, 0, 0, 0, 0, 0.024390243902439025, 0, 0, 0, 0.008403361344537815, 0, 0, 0, 0, 0.034482758620689655, 0, 0, 0, 0, 0, 0, 0, 0, 0.027777777777777776, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.041666666666666664, 0, 0, 0, 0, 0, 0.03125, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0.020833333333333332, 0.034482758620689655, 0, 0, 0, 0, 0.041666666666666664, 0, 0, 0, 0.015151515151515152, 0, 0.05, 0, 0.024390243902439025, 0, 0.041666666666666664, 0.017543859649122806, 0, 0.0625, 0, 0, 0, 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0625, 0, 0, 0, 0, 0, 0, 0, 0.025, 0, 0, 0, 0, 0, 0, 0, 0.04, 0.013513513513513514, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0.012048192771084338, 0.012048192771084338, 0, 0, 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0.010869565217391304, 0.010416666666666666, 0.011904761904761904, 0, 0.011627906976744186, 0.011363636363636364, 0, 0, 0, 0.041666666666666664, 0.012345679012345678, 0, 0.012345679012345678, 0.01098901098901099, 0, 0, 0, 0, 0.036585365853658534, 0, 0, 0.029411764705882353, 0, 0.0625, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010638297872340425, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0.009900990099009901, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011627906976744186, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0.015151515151515152, 0.013333333333333334, 0, 0, 0.02564102564102564, 0, 0, 0, 0, 0, 0, 0, 0, 0.009900990099009901, 0, 0, 0, 0, 0, 0, 0, 0.05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0.05, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0.05, 0, 0.009523809523809525, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.041666666666666664, 0.009523809523809525, 0, 0, 0, 0.007633587786259542, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0.01652892561983471, 0, 0.01652892561983471, 0.058823529411764705, 0, 0, 0, 0.2857142857142857 ]
565
0.004877
#!/usr/bin/env python # # Copyright 2007 Jose Fonseca # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # def run(statement, filename=None, sort=-1): import os, tempfile, hotshot, hotshot.stats logfd, logfn = tempfile.mkstemp() prof = hotshot.Profile(logfn) try: prof = prof.run(statement) except SystemExit: pass try: try: prof = prof.run(statement) except SystemExit: pass prof.close() finally: stats = hotshot.stats.load(logfn) stats.strip_dirs() stats.sort_stats(sort) if filename is not None: result = stats.dump_stats(filename) else: result = stats.print_stats() os.unlink(logfn) return result def main(): import os, sys from optparse import OptionParser usage = "hotshotmain.py [-o output_file_path] [-s sort] scriptfile [arg] ..." parser = OptionParser(usage=usage) parser.allow_interspersed_args = False parser.add_option('-o', '--outfile', dest="outfile", help="Save stats to <outfile>", default=None) parser.add_option('-s', '--sort', dest="sort", help="Sort order when printing to stdout, based on pstats.Stats class", default=-1) if not sys.argv[1:]: parser.print_usage() sys.exit(2) (options, args) = parser.parse_args() sys.argv[:] = args if (len(sys.argv) > 0): sys.path.insert(0, os.path.dirname(sys.argv[0])) run('execfile(%r)' % (sys.argv[0],), options.outfile, options.sort) else: parser.print_usage() return parser if __name__ == "__main__": main()
[ "#!/usr/bin/env python\n", "#\n", "# Copyright 2007 Jose Fonseca\n", "#\n", "# This program is free software: you can redistribute it and/or modify it\n", "# under the terms of the GNU Lesser General Public License as published\n", "# by the Free Software Foundation, either version 3 of the License, or\n", "# (at your option) any later version.\n", "#\n", "# This program is distributed in the hope that it will be useful,\n", "# but WITHOUT ANY WARRANTY; without even the implied warranty of\n", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n", "# GNU Lesser General Public License for more details.\n", "#\n", "# You should have received a copy of the GNU Lesser General Public License\n", "# along with this program. If not, see <http://www.gnu.org/licenses/>.\n", "#\n", "\n", "def run(statement, filename=None, sort=-1):\n", " import os, tempfile, hotshot, hotshot.stats\n", " logfd, logfn = tempfile.mkstemp()\n", " prof = hotshot.Profile(logfn)\n", " try:\n", " prof = prof.run(statement)\n", " except SystemExit:\n", " pass\n", " try:\n", " try:\n", " prof = prof.run(statement)\n", " except SystemExit:\n", " pass\n", " prof.close()\n", " finally:\n", " stats = hotshot.stats.load(logfn)\n", " stats.strip_dirs()\n", " stats.sort_stats(sort)\n", " if filename is not None:\n", " result = stats.dump_stats(filename)\n", " else:\n", " result = stats.print_stats()\n", " os.unlink(logfn)\n", " return result\n", "\n", "def main():\n", " import os, sys\n", " from optparse import OptionParser\n", " usage = \"hotshotmain.py [-o output_file_path] [-s sort] scriptfile [arg] ...\"\n", " parser = OptionParser(usage=usage)\n", " parser.allow_interspersed_args = False\n", " parser.add_option('-o', '--outfile', dest=\"outfile\",\n", " help=\"Save stats to <outfile>\", default=None)\n", " parser.add_option('-s', '--sort', dest=\"sort\",\n", " help=\"Sort order when printing to stdout, based on pstats.Stats class\", default=-1)\n", "\n", " if not sys.argv[1:]:\n", " parser.print_usage()\n", " sys.exit(2)\n", "\n", " (options, args) = parser.parse_args()\n", " sys.argv[:] = args\n", "\n", " if (len(sys.argv) > 0):\n", " sys.path.insert(0, os.path.dirname(sys.argv[0]))\n", " run('execfile(%r)' % (sys.argv[0],), options.outfile, options.sort)\n", " else:\n", " parser.print_usage()\n", " return parser\n", "\n", "if __name__ == \"__main__\":\n", " main()\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.020833333333333332, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.08333333333333333, 0.05263157894736842, 0, 0.012195121951219513, 0, 0, 0, 0.018518518518518517, 0, 0.021739130434782608, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.037037037037037035, 0 ]
70
0.003518
# Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. # """ Implements the old SSHv1 key agent protocol. This module is unstable. Maintainer: U{Paul Swartz<mailto:z3p@twistedmatrix.com>} """ import struct from common import NS, getNS from twisted.conch.error import ConchError from twisted.internet import defer, protocol class SSHAgentClient(protocol.Protocol): def __init__(self): self.buf = '' self.deferreds = [] def dataReceived(self, data): self.buf += data while 1: if len(self.buf) <= 4: return packLen = struct.unpack('!L', self.buf[:4])[0] if len(self.buf) < 4+packLen: return packet, self.buf = self.buf[4:4+packLen], self.buf[4+packLen:] reqType = ord(packet[0]) d = self.deferreds.pop(0) if reqType == AGENT_FAILURE: d.errback(ConchError('agent failure')) elif reqType == AGENT_SUCCESS: d.callback('') else: d.callback(packet) def sendRequest(self, reqType, data): pack = struct.pack('!LB',len(data)+1, reqType)+data self.transport.write(pack) d = defer.Deferred() self.deferreds.append(d) return d def requestIdentities(self): return self.sendRequest(AGENTC_REQUEST_IDENTITIES, '').addCallback(self._cbRequestIdentities) def _cbRequestIdentities(self, data): if ord(data[0]) != AGENT_IDENTITIES_ANSWER: return ConchError('unexpected respone: %i' % ord(data[0])) numKeys = struct.unpack('!L', data[1:5])[0] keys = [] data = data[5:] for i in range(numKeys): blobLen = struct.unpack('!L', data[:4])[0] blob, data = data[4:4+blobLen], data[4+blobLen:] commLen = struct.unpack('!L', data[:4])[0] comm, data = data[4:4+commLen], data[4+commLen:] keys.append((blob, comm)) return keys def addIdentity(self, blob, comment = ''): req = blob req += NS(comment) co return self.sendRequest(AGENTC_ADD_IDENTITY, req) def signData(self, blob, data): req = NS(blob) req += NS(data) req += '\000\000\000\000' # flags return self.sendRequest(AGENTC_SIGN_REQUEST, req).addCallback(self._cbSignData) def _cbSignData(self, data): if data[0] != chr(AGENT_SIGN_RESPONSE): return ConchError('unexpected data: %i' % ord(data[0])) signature = getNS(data[1:])[0] return signature def removeIdentity(self, blob): req = NS(blob) return self.sendRequest(AGENTC_REMOVE_IDENTITY, req) def removeAllIdentities(self): return self.sendRequest(AGENTC_REMOVE_ALL_IDENTITIES, '') class SSHAgentServer(protocol.Protocol): def __init__(self): self.buf = '' def dataReceived(self, data): self.buf += data while 1: if len(self.buf) <= 4: return packLen = struct.unpack('!L', self.buf[:4])[0] if len(self.buf) < 4+packLen: return packet, self.buf = self.buf[4:4+packLen], self.buf[4+packLen:] reqType = ord(packet[0]) reqName = messages.get(reqType, None) if not reqName: print 'bad request', reqType f = getattr(self, 'agentc_%s' % reqName) f(packet[1:]) def sendResponse(self, reqType, data): pack = struct.pack('!LB', len(data)+1, reqType) + data self.transport.write(pack) def agentc_REQUEST_IDENTITIES(self, data): assert data == '' numKeys = len(self.keys) s = struct.pack('!L', numKeys) for k in self.keys: s += struct.pack('!L', len(k)) + k s += struct.pack('!L', len(self.keys[k][1])) + self.keys[k][1] self.sendResponse(AGENT_IDENTITIES_ANSWER, s) def agentc_SIGN_REQUEST(self, data): blob, data = common.getNS(data) if blob not in self.keys: return self.sendResponse(AGENT_FAILURE, '') signData, data = common.getNS(data) assert data == '\000\000\000\000' self.sendResponse(AGENT_SIGN_RESPONSE, common.NS(keys.signData(self.keys[blob][0], signData))) def agentc_ADD_IDENTITY(self, data): pass def agentc_REMOVE_IDENTITY(self, data): pass def agentc_REMOVE_ALL_IDENTITIES(self, data): pass AGENT_FAILURE = 5 AGENT_SUCCESS = 6 AGENTC_REQUEST_IDENTITIES = 11 AGENT_IDENTITIES_ANSWER = 12 AGENTC_SIGN_REQUEST = 13 AGENT_SIGN_RESPONSE = 14 AGENTC_ADD_IDENTITY = 17 AGENTC_REMOVE_IDENTITY = 18 AGENTC_REMOVE_ALL_IDENTITIES = 19 messages = {} import agent for v in dir(agent): if v.startswith('AGENTC_'): messages[getattr(agent, v)] = v[7:]
[ "# Copyright (c) 2001-2004 Twisted Matrix Laboratories.\n", "# See LICENSE for details.\n", "\n", "#\n", "\"\"\"\n", "Implements the old SSHv1 key agent protocol.\n", "\n", "This module is unstable.\n", "\n", "Maintainer: U{Paul Swartz<mailto:z3p@twistedmatrix.com>}\n", "\"\"\"\n", "\n", "import struct\n", "from common import NS, getNS\n", "from twisted.conch.error import ConchError\n", "from twisted.internet import defer, protocol\n", "\n", "class SSHAgentClient(protocol.Protocol):\n", " \n", " def __init__(self):\n", " self.buf = ''\n", " self.deferreds = []\n", "\n", " def dataReceived(self, data):\n", " self.buf += data\n", " while 1:\n", " if len(self.buf) <= 4: return\n", " packLen = struct.unpack('!L', self.buf[:4])[0]\n", " if len(self.buf) < 4+packLen: return\n", " packet, self.buf = self.buf[4:4+packLen], self.buf[4+packLen:]\n", " reqType = ord(packet[0])\n", " d = self.deferreds.pop(0)\n", " if reqType == AGENT_FAILURE:\n", " d.errback(ConchError('agent failure'))\n", " elif reqType == AGENT_SUCCESS:\n", " d.callback('')\n", " else:\n", " d.callback(packet)\n", "\n", " def sendRequest(self, reqType, data):\n", " pack = struct.pack('!LB',len(data)+1, reqType)+data\n", " self.transport.write(pack)\n", " d = defer.Deferred()\n", " self.deferreds.append(d)\n", " return d\n", "\n", " def requestIdentities(self):\n", " return self.sendRequest(AGENTC_REQUEST_IDENTITIES, '').addCallback(self._cbRequestIdentities)\n", "\n", " def _cbRequestIdentities(self, data):\n", " if ord(data[0]) != AGENT_IDENTITIES_ANSWER:\n", " return ConchError('unexpected respone: %i' % ord(data[0]))\n", " numKeys = struct.unpack('!L', data[1:5])[0]\n", " keys = []\n", " data = data[5:]\n", " for i in range(numKeys):\n", " blobLen = struct.unpack('!L', data[:4])[0]\n", " blob, data = data[4:4+blobLen], data[4+blobLen:]\n", " commLen = struct.unpack('!L', data[:4])[0]\n", " comm, data = data[4:4+commLen], data[4+commLen:]\n", " keys.append((blob, comm))\n", " return keys\n", "\n", " def addIdentity(self, blob, comment = ''):\n", " req = blob\n", " req += NS(comment)\n", " co\n", " return self.sendRequest(AGENTC_ADD_IDENTITY, req)\n", "\n", " def signData(self, blob, data):\n", " req = NS(blob)\n", " req += NS(data)\n", " req += '\\000\\000\\000\\000' # flags\n", " return self.sendRequest(AGENTC_SIGN_REQUEST, req).addCallback(self._cbSignData)\n", "\n", " def _cbSignData(self, data):\n", " if data[0] != chr(AGENT_SIGN_RESPONSE):\n", " return ConchError('unexpected data: %i' % ord(data[0]))\n", " signature = getNS(data[1:])[0]\n", " return signature\n", "\n", " def removeIdentity(self, blob):\n", " req = NS(blob)\n", " return self.sendRequest(AGENTC_REMOVE_IDENTITY, req)\n", "\n", " def removeAllIdentities(self):\n", " return self.sendRequest(AGENTC_REMOVE_ALL_IDENTITIES, '')\n", "\n", "class SSHAgentServer(protocol.Protocol):\n", "\n", " def __init__(self):\n", " self.buf = '' \n", "\n", " def dataReceived(self, data):\n", " self.buf += data\n", " while 1:\n", " if len(self.buf) <= 4: return\n", " packLen = struct.unpack('!L', self.buf[:4])[0]\n", " if len(self.buf) < 4+packLen: return\n", " packet, self.buf = self.buf[4:4+packLen], self.buf[4+packLen:]\n", " reqType = ord(packet[0])\n", " reqName = messages.get(reqType, None)\n", " if not reqName:\n", " print 'bad request', reqType\n", " f = getattr(self, 'agentc_%s' % reqName)\n", " f(packet[1:])\n", "\n", " def sendResponse(self, reqType, data):\n", " pack = struct.pack('!LB', len(data)+1, reqType) + data\n", " self.transport.write(pack)\n", "\n", " def agentc_REQUEST_IDENTITIES(self, data):\n", " assert data == ''\n", " numKeys = len(self.keys)\n", " s = struct.pack('!L', numKeys)\n", " for k in self.keys:\n", " s += struct.pack('!L', len(k)) + k\n", " s += struct.pack('!L', len(self.keys[k][1])) + self.keys[k][1]\n", " self.sendResponse(AGENT_IDENTITIES_ANSWER, s)\n", "\n", " def agentc_SIGN_REQUEST(self, data):\n", " blob, data = common.getNS(data)\n", " if blob not in self.keys:\n", " return self.sendResponse(AGENT_FAILURE, '')\n", " signData, data = common.getNS(data)\n", " assert data == '\\000\\000\\000\\000'\n", " self.sendResponse(AGENT_SIGN_RESPONSE, common.NS(keys.signData(self.keys[blob][0], signData)))\n", "\n", " def agentc_ADD_IDENTITY(self, data): pass\n", " def agentc_REMOVE_IDENTITY(self, data): pass\n", " def agentc_REMOVE_ALL_IDENTITIES(self, data): pass\n", "\n", "AGENT_FAILURE = 5\n", "AGENT_SUCCESS = 6\n", "AGENTC_REQUEST_IDENTITIES = 11\n", "AGENT_IDENTITIES_ANSWER = 12\n", "AGENTC_SIGN_REQUEST = 13\n", "AGENT_SIGN_RESPONSE = 14\n", "AGENTC_ADD_IDENTITY = 17\n", "AGENTC_REMOVE_IDENTITY = 18\n", "AGENTC_REMOVE_ALL_IDENTITIES = 19\n", "\n", "messages = {}\n", "import agent\n", "for v in dir(agent):\n", " if v.startswith('AGENTC_'):\n", " messages[getattr(agent, v)] = v[7:]\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.024390243902439025, 0.2, 0, 0, 0, 0, 0, 0, 0, 0.023809523809523808, 0, 0.02040816326530612, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.016666666666666666, 0, 0, 0, 0, 0, 0, 0.00980392156862745, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0425531914893617, 0, 0, 0, 0, 0, 0, 0, 0, 0.023809523809523808, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.024390243902439025, 0, 0, 0.043478260869565216, 0, 0, 0, 0, 0.023809523809523808, 0, 0.02040816326530612, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009708737864077669, 0, 0, 0, 0, 0, 0.05555555555555555, 0.027777777777777776, 0.02702702702702703, 0.02702702702702703, 0.02702702702702703, 0.02702702702702703, 0.02702702702702703, 0.02702702702702703, 0.02702702702702703, 0, 0, 0.07692307692307693, 0, 0, 0 ]
147
0.005742
#!/usr/bin/env python # -*- coding: utf-8 -*- # # complexity documentation build configuration file, created by # sphinx-quickstart on Tue Jul 9 22:26:36 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # Get the project root dir, which is the parent dir of this cwd = os.getcwd() project_root = os.path.dirname(cwd) # Insert the project root dir as the first element in the PYTHONPATH. # This lets us ensure that the source package is imported, and that its # version is used. sys.path.insert(0, project_root) import ftpext # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'ftpext' copyright = u'2014, Kalle Lindqvist' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = ftpext.__version__ # The full version, including alpha/beta/rc tags. release = ftpext.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'ftpextdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'ftpext.tex', u'ftpext Documentation', u'Kalle Lindqvist', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ftpext', u'ftpext Documentation', [u'Kalle Lindqvist'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'ftpext', u'ftpext Documentation', u'Kalle Lindqvist', 'ftpext', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
[ "#!/usr/bin/env python\n", "# -*- coding: utf-8 -*-\n", "#\n", "# complexity documentation build configuration file, created by\n", "# sphinx-quickstart on Tue Jul 9 22:26:36 2013.\n", "#\n", "# This file is execfile()d with the current directory set to its containing dir.\n", "#\n", "# Note that not all possible configuration values are present in this\n", "# autogenerated file.\n", "#\n", "# All configuration values have a default; values that are commented out\n", "# serve to show the default.\n", "\n", "\n", "import sys, os\n", "# If extensions (or modules to document with autodoc) are in another directory,\n", "# add these directories to sys.path here. If the directory is relative to the\n", "# documentation root, use os.path.abspath to make it absolute, like shown here.\n", "#sys.path.insert(0, os.path.abspath('.'))\n", "\n", "# Get the project root dir, which is the parent dir of this\n", "cwd = os.getcwd()\n", "project_root = os.path.dirname(cwd)\n", "# Insert the project root dir as the first element in the PYTHONPATH.\n", "# This lets us ensure that the source package is imported, and that its\n", "# version is used.\n", "sys.path.insert(0, project_root)\n", "\n", "import ftpext\n", "\n", "# -- General configuration -----------------------------------------------------\n", "\n", "# If your documentation needs a minimal Sphinx version, state it here.\n", "#needs_sphinx = '1.0'\n", "\n", "# Add any Sphinx extension module names here, as strings. They can be extensions\n", "# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\n", "extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']\n", "\n", "# Add any paths that contain templates here, relative to this directory.\n", "templates_path = ['_templates']\n", "\n", "# The suffix of source filenames.\n", "source_suffix = '.rst'\n", "\n", "# The encoding of source files.\n", "#source_encoding = 'utf-8-sig'\n", "\n", "# The master toctree document.\n", "master_doc = 'index'\n", "\n", "# General information about the project.\n", "project = u'ftpext'\n", "copyright = u'2014, Kalle Lindqvist'\n", "\n", "# The version info for the project you're documenting, acts as replacement for\n", "# |version| and |release|, also used in various other places throughout the\n", "# built documents.\n", "#\n", "# The short X.Y version.\n", "version = ftpext.__version__\n", "# The full version, including alpha/beta/rc tags.\n", "release = ftpext.__version__\n", "\n", "# The language for content autogenerated by Sphinx. Refer to documentation\n", "# for a list of supported languages.\n", "#language = None\n", "\n", "# There are two options for replacing |today|: either, you set today to some\n", "# non-false value, then it is used:\n", "#today = ''\n", "# Else, today_fmt is used as the format for a strftime call.\n", "#today_fmt = '%B %d, %Y'\n", "\n", "# List of patterns, relative to source directory, that match files and\n", "# directories to ignore when looking for source files.\n", "exclude_patterns = ['_build']\n", "\n", "# The reST default role (used for this markup: `text`) to use for all documents.\n", "#default_role = None\n", "\n", "# If true, '()' will be appended to :func: etc. cross-reference text.\n", "#add_function_parentheses = True\n", "\n", "# If true, the current module name will be prepended to all description\n", "# unit titles (such as .. function::).\n", "#add_module_names = True\n", "\n", "# If true, sectionauthor and moduleauthor directives will be shown in the\n", "# output. They are ignored by default.\n", "#show_authors = False\n", "\n", "# The name of the Pygments (syntax highlighting) style to use.\n", "pygments_style = 'sphinx'\n", "\n", "# A list of ignored prefixes for module index sorting.\n", "#modindex_common_prefix = []\n", "\n", "# If true, keep warnings as \"system message\" paragraphs in the built documents.\n", "#keep_warnings = False\n", "\n", "\n", "# -- Options for HTML output ---------------------------------------------------\n", "\n", "# The theme to use for HTML and HTML Help pages. See the documentation for\n", "# a list of builtin themes.\n", "html_theme = 'default'\n", "\n", "# Theme options are theme-specific and customize the look and feel of a theme\n", "# further. For a list of options available for each theme, see the\n", "# documentation.\n", "#html_theme_options = {}\n", "\n", "# Add any paths that contain custom themes here, relative to this directory.\n", "#html_theme_path = []\n", "\n", "# The name for this set of Sphinx documents. If None, it defaults to\n", "# \"<project> v<release> documentation\".\n", "#html_title = None\n", "\n", "# A shorter title for the navigation bar. Default is the same as html_title.\n", "#html_short_title = None\n", "\n", "# The name of an image file (relative to this directory) to place at the top\n", "# of the sidebar.\n", "#html_logo = None\n", "\n", "# The name of an image file (within the static path) to use as favicon of the\n", "# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n", "# pixels large.\n", "#html_favicon = None\n", "\n", "# Add any paths that contain custom static files (such as style sheets) here,\n", "# relative to this directory. They are copied after the builtin static files,\n", "# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n", "html_static_path = ['_static']\n", "\n", "# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n", "# using the given strftime format.\n", "#html_last_updated_fmt = '%b %d, %Y'\n", "\n", "# If true, SmartyPants will be used to convert quotes and dashes to\n", "# typographically correct entities.\n", "#html_use_smartypants = True\n", "\n", "# Custom sidebar templates, maps document names to template names.\n", "#html_sidebars = {}\n", "\n", "# Additional templates that should be rendered to pages, maps page names to\n", "# template names.\n", "#html_additional_pages = {}\n", "\n", "# If false, no module index is generated.\n", "#html_domain_indices = True\n", "\n", "# If false, no index is generated.\n", "#html_use_index = True\n", "\n", "# If true, the index is split into individual pages for each letter.\n", "#html_split_index = False\n", "\n", "# If true, links to the reST sources are added to the pages.\n", "#html_show_sourcelink = True\n", "\n", "# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n", "#html_show_sphinx = True\n", "\n", "# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n", "#html_show_copyright = True\n", "\n", "# If true, an OpenSearch description file will be output, and all pages will\n", "# contain a <link> tag referring to it. The value of this option must be the\n", "# base URL from which the finished HTML is served.\n", "#html_use_opensearch = ''\n", "\n", "# This is the file name suffix for HTML files (e.g. \".xhtml\").\n", "#html_file_suffix = None\n", "\n", "# Output file base name for HTML help builder.\n", "htmlhelp_basename = 'ftpextdoc'\n", "\n", "\n", "# -- Options for LaTeX output --------------------------------------------------\n", "\n", "latex_elements = {\n", "# The paper size ('letterpaper' or 'a4paper').\n", "#'papersize': 'letterpaper',\n", "\n", "# The font size ('10pt', '11pt' or '12pt').\n", "#'pointsize': '10pt',\n", "\n", "# Additional stuff for the LaTeX preamble.\n", "#'preamble': '',\n", "}\n", "\n", "# Grouping the document tree into LaTeX files. List of tuples\n", "# (source start file, target name, title, author, documentclass [howto/manual]).\n", "latex_documents = [\n", " ('index', 'ftpext.tex', u'ftpext Documentation',\n", " u'Kalle Lindqvist', 'manual'),\n", "]\n", "\n", "# The name of an image file (relative to this directory) to place at the top of\n", "# the title page.\n", "#latex_logo = None\n", "\n", "# For \"manual\" documents, if this is true, then toplevel headings are parts,\n", "# not chapters.\n", "#latex_use_parts = False\n", "\n", "# If true, show page references after internal links.\n", "#latex_show_pagerefs = False\n", "\n", "# If true, show URL addresses after external links.\n", "#latex_show_urls = False\n", "\n", "# Documents to append as an appendix to all manuals.\n", "#latex_appendices = []\n", "\n", "# If false, no module index is generated.\n", "#latex_domain_indices = True\n", "\n", "\n", "# -- Options for manual page output --------------------------------------------\n", "\n", "# One entry per manual page. List of tuples\n", "# (source start file, name, description, authors, manual section).\n", "man_pages = [\n", " ('index', 'ftpext', u'ftpext Documentation',\n", " [u'Kalle Lindqvist'], 1)\n", "]\n", "\n", "# If true, show URL addresses after external links.\n", "#man_show_urls = False\n", "\n", "\n", "# -- Options for Texinfo output ------------------------------------------------\n", "\n", "# Grouping the document tree into Texinfo files. List of tuples\n", "# (source start file, target name, title, author,\n", "# dir menu entry, description, category)\n", "texinfo_documents = [\n", " ('index', 'ftpext', u'ftpext Documentation',\n", " u'Kalle Lindqvist', 'ftpext', 'One line description of project.',\n", " 'Miscellaneous'),\n", "]\n", "\n", "# Documents to append as an appendix to all manuals.\n", "#texinfo_appendices = []\n", "\n", "# If false, no module index is generated.\n", "#texinfo_domain_indices = True\n", "\n", "# How to display URL addresses: 'footnote', 'no', or 'inline'.\n", "#texinfo_show_urls = 'footnote'\n", "\n", "# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n", "#texinfo_no_detailmenu = False\n" ]
[ 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0.06666666666666667, 0, 0, 0, 0.023809523809523808, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07142857142857142, 0, 0.012345679012345678, 0, 0, 0.045454545454545456, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03225806451612903, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705, 0, 0, 0, 0.08333333333333333, 0, 0.04, 0, 0, 0, 0, 0, 0.012345679012345678, 0.047619047619047616, 0, 0, 0.030303030303030304, 0, 0, 0, 0.04, 0, 0, 0, 0.045454545454545456, 0, 0, 0, 0, 0, 0.034482758620689655, 0, 0, 0.043478260869565216, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0.04, 0, 0, 0.045454545454545456, 0, 0, 0, 0.05263157894736842, 0, 0, 0.04, 0, 0, 0, 0.05555555555555555, 0, 0, 0, 0, 0.047619047619047616, 0, 0, 0, 0, 0, 0, 0, 0, 0.02702702702702703, 0, 0, 0, 0.034482758620689655, 0, 0, 0.05, 0, 0, 0, 0.03571428571428571, 0, 0, 0.03571428571428571, 0, 0, 0.043478260869565216, 0, 0, 0.038461538461538464, 0, 0, 0.034482758620689655, 0, 0, 0.04, 0, 0, 0.03571428571428571, 0, 0, 0, 0, 0.038461538461538464, 0, 0, 0.04, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0.02127659574468085, 0.06896551724137931, 0, 0.022727272727272728, 0.09090909090909091, 0, 0.023255813953488372, 0.11764705882352941, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0.05263157894736842, 0, 0, 0, 0.04, 0, 0, 0.034482758620689655, 0, 0, 0.04, 0, 0, 0.043478260869565216, 0, 0, 0.034482758620689655, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.043478260869565216, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04, 0, 0, 0.03225806451612903, 0, 0, 0.03125, 0, 0, 0.03225806451612903 ]
259
0.008796
""" A fake backend for dummying during tests. """ from django.utils.encoding import force_unicode from haystack.backends import BaseSearchBackend, BaseSearchQuery, log_query from haystack.constants import FILTER_SEPARATOR from haystack.models import SearchResult BACKEND_NAME = 'dummy' class DummySearchResult(SearchResult): dm = type('DummyModel', (object,), {}) def _get_object(self): return self.dm() def _set_object(self, obj): pass def _get_model(self): return self.dm def _set_model(self, obj): pass def content_type(self): return u"%s.%s" % (self.app_label, self.model_name) class SearchBackend(BaseSearchBackend): def update(self, indexer, iterable, commit=True): pass def remove(self, obj, commit=True): pass def clear(self, models=[], commit=True): pass @log_query def search(self, query_string, sort_by=None, start_offset=0, end_offset=None, fields='', highlight=False, facets=None, date_facets=None, query_facets=None, narrow_queries=None, spelling_query=None, limit_to_registered_models=None, **kwargs): if query_string == '(content__exact hello AND content__exact world)': return { 'results': [DummySearchResult('haystack', 'dummymodel', 1, 1.5)], 'hits': 1, } return { 'results': [], 'hits': 0, } def prep_value(self, db_field, value): return value def more_like_this(self, model_instance, additional_query_string=None, start_offset=0, end_offset=None, limit_to_registered_models=None, **kwargs): return { 'results': [], 'hits': 0 } class SearchQuery(BaseSearchQuery): def __init__(self, site=None, backend=None): super(SearchQuery, self).__init__(backend=backend) if backend is not None: self.backend = backend else: self.backend = SearchBackend(site=site) def build_query_fragment(self, field, filter_type, value): result = '' value = force_unicode(value) # Check to see if it's a phrase for an exact match. if ' ' in value: value = '"%s"' % value index_fieldname = self.backend.site.get_index_fieldname(field) # 'content' is a special reserved word, much like 'pk' in # Django's ORM layer. It indicates 'no special field'. result = ' '.join([FILTER_SEPARATOR.join((index_fieldname, filter_type)), value]) return result
[ "\"\"\"\n", "A fake backend for dummying during tests.\n", "\"\"\"\n", "from django.utils.encoding import force_unicode\n", "from haystack.backends import BaseSearchBackend, BaseSearchQuery, log_query\n", "from haystack.constants import FILTER_SEPARATOR\n", "from haystack.models import SearchResult\n", "\n", "\n", "BACKEND_NAME = 'dummy'\n", "\n", "\n", "class DummySearchResult(SearchResult):\n", " dm = type('DummyModel', (object,), {})\n", " \n", " def _get_object(self):\n", " return self.dm()\n", " \n", " def _set_object(self, obj):\n", " pass\n", " \n", " def _get_model(self):\n", " return self.dm\n", " \n", " def _set_model(self, obj):\n", " pass\n", "\n", " def content_type(self):\n", " return u\"%s.%s\" % (self.app_label, self.model_name)\n", "\n", "\n", "class SearchBackend(BaseSearchBackend):\n", " def update(self, indexer, iterable, commit=True):\n", " pass\n", " \n", " def remove(self, obj, commit=True):\n", " pass\n", " \n", " def clear(self, models=[], commit=True):\n", " pass\n", " \n", " @log_query\n", " def search(self, query_string, sort_by=None, start_offset=0, end_offset=None,\n", " fields='', highlight=False, facets=None, date_facets=None, query_facets=None,\n", " narrow_queries=None, spelling_query=None,\n", " limit_to_registered_models=None, **kwargs):\n", " if query_string == '(content__exact hello AND content__exact world)':\n", " return {\n", " 'results': [DummySearchResult('haystack', 'dummymodel', 1, 1.5)],\n", " 'hits': 1,\n", " }\n", " \n", " return {\n", " 'results': [],\n", " 'hits': 0,\n", " }\n", " \n", " def prep_value(self, db_field, value):\n", " return value\n", " \n", " def more_like_this(self, model_instance, additional_query_string=None,\n", " start_offset=0, end_offset=None,\n", " limit_to_registered_models=None, **kwargs):\n", " return {\n", " 'results': [],\n", " 'hits': 0\n", " }\n", "\n", "\n", "class SearchQuery(BaseSearchQuery):\n", " def __init__(self, site=None, backend=None):\n", " super(SearchQuery, self).__init__(backend=backend)\n", " \n", " if backend is not None:\n", " self.backend = backend\n", " else:\n", " self.backend = SearchBackend(site=site)\n", " \n", " def build_query_fragment(self, field, filter_type, value):\n", " result = ''\n", " value = force_unicode(value)\n", " \n", " # Check to see if it's a phrase for an exact match.\n", " if ' ' in value:\n", " value = '\"%s\"' % value\n", " \n", " index_fieldname = self.backend.site.get_index_fieldname(field)\n", " \n", " # 'content' is a special reserved word, much like 'pk' in\n", " # Django's ORM layer. It indicates 'no special field'.\n", " result = ' '.join([FILTER_SEPARATOR.join((index_fieldname, filter_type)), value])\n", " return result\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0.2, 0, 0, 0.2, 0, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0.2, 0, 0, 0.2, 0, 0.012195121951219513, 0.010752688172043012, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0.1111111111111111, 0, 0, 0, 0, 0.2, 0, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0, 0.2, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0.1111111111111111, 0, 0.1111111111111111, 0, 0, 0.011111111111111112, 0 ]
92
0.028281
# -*- coding: utf-8 -*- #!/usr/bin/env python """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdip...@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Attention: Requires Chrome or Safari. For IE of Firefox you need https://github.com/gimite/web-socket-js 1) install tornado (requires Tornado 3.0 or later) easy_install tornado 2) start this app: python gluon/contrib/websocket_messaging.py -k mykey -p 8888 3) from any web2py app you can post messages with from gluon.contrib.websocket_messaging import websocket_send websocket_send('http://127.0.0.1:8888', 'Hello World', 'mykey', 'mygroup') 4) from any template you can receive them with <script> $(document).ready(function(){ if(!$.web2py.web2py_websocket('ws://127.0.0.1:8888/realtime/mygroup', function(e){alert(e.data)})) alert("html5 websocket not supported by your browser, try Google Chrome"); }); </script> When the server posts a message, all clients connected to the page will popup an alert message Or if you want to send json messages and store evaluated json in a var called data: <script> $(document).ready(function(){ var data; $.web2py.web2py_websocket('ws://127.0.0.1:8888/realtime/mygroup', function(e){data=eval('('+e.data+')')}); }); </script> - All communications between web2py and websocket_messaging will be digitally signed with hmac. - All validation is handled on the web2py side and there is no need to modify websocket_messaging.py - Multiple web2py instances can talk with one or more websocket_messaging servers. - "ws://127.0.0.1:8888/realtime/" must be contain the IP of the websocket_messaging server. - Via group='mygroup' name you can support multiple groups of clients (think of many chat-rooms) Here is a complete sample web2py action: def index(): form=LOAD('default', 'ajax_form', ajax=True) script=SCRIPT(''' jQuery(document).ready(function(){ var callback=function(e){alert(e.data)}; if(!$.web2py.web2py_websocket('ws://127.0.0.1:8888/realtime/mygroup', callback)) alert("html5 websocket not supported by your browser, try Google Chrome"); }); ''') return dict(form=form, script=script) def ajax_form(): form=SQLFORM.factory(Field('message')) if form.accepts(request,session): from gluon.contrib.websocket_messaging import websocket_send websocket_send( 'http://127.0.0.1:8888', form.vars.message, 'mykey', 'mygroup') return form https is possible too using 'https://127.0.0.1:8888' instead of 'http://127.0.0.1:8888', but need to be started with python gluon/contrib/websocket_messaging.py -k mykey -p 8888 -s keyfile.pem -c certfile.pem for secure websocket do: web2py_websocket('wss://127.0.0.1:8888/realtime/mygroup',callback) Acknowledgements: Tornado code inspired by http://thomas.pelletier.im/2010/08/websocket-tornado-redis/ """ import tornado.httpserver import tornado.websocket import tornado.ioloop import tornado.web import hmac import sys import optparse import urllib import time listeners, names, tokens = {}, {}, {} import logging def websocket_send(url, message, hmac_key=None, group='default'): sig = hmac_key and hmac.new(hmac_key, message).hexdigest() or '' params = urllib.urlencode( {'message': message, 'signature': sig, 'group': group}) f = urllib.urlopen(url, params) data = f.read() f.close() return data class PostHandler(tornado.web.RequestHandler): """ only authorized parties can post messages """ def post(self): logger.info('PostHandler') if hmac_key and not 'signature' in self.request.arguments: self.send_error(401) if 'message' in self.request.arguments: message = self.request.arguments['message'][0] group = self.request.arguments.get('group', ['default'])[0] print '%s:MESSAGE to %s:%s' % (time.time(), group, message) if hmac_key: signature = self.request.arguments['signature'][0] if not hmac.new(hmac_key, message).hexdigest() == signature: self.send_error(401) for client in listeners.get(group, []): client.write_message(message) class TokenHandler(tornado.web.RequestHandler): """ if running with -t post a token to allow a client to join using the token the message here is the token (any uuid) allows only authorized parties to joins, for example, a chat """ def post(self): logger.info('TokenHandler') if hmac_key and not 'message' in self.request.arguments: self.send_error(401) if 'message' in self.request.arguments: message = self.request.arguments['message'][0] if hmac_key: signature = self.request.arguments['signature'][0] if not hmac.new(hmac_key, message).hexdigest() == signature: self.send_error(401) tokens[message] = None class DistributeHandler(tornado.websocket.WebSocketHandler): def open(self, params): logger.info('DistributeHandler') group, token, name = params.split('/') + [None, None] self.group = group or 'default' self.token = token or 'none' self.name = name or 'anonymous' # only authorized parties can join if DistributeHandler.tokens: if not self.token in tokens or not token[self.token] is None: self.close() else: tokens[self.token] = self if not self.group in listeners: listeners[self.group] = [] # notify clients that a member has joined the groups for client in listeners.get(self.group, []): client.write_message('+' + self.name) listeners[self.group].append(self) names[self] = self.name print '%s:CONNECT to %s' % (time.time(), self.group) def on_message(self, message): pass def on_close(self): if self.group in listeners: listeners[self.group].remove(self) del names[self] # notify clients that a member has left the groups for client in listeners.get(self.group, []): client.write_message('-' + self.name) print '%s:DISCONNECT from %s' % (time.time(), self.group) def get_offline_logger(): if 'logger' not in globals(): import logging stream = logging.StreamHandler() stream.setFormatter(logging.Formatter('%(levelname)5s:%(module)15s.%(funcName)-20s:%(lineno)4d: %(message)s','%Y%m%d %H:%M:%S')) stream.setLevel(logging.DEBUG) logger = logging.getLogger() logger.setLevel(logging.DEBUG) logger.addHandler(stream) globals()['logger']=logger return globals()['logger'] # if your webserver is different from tornado server uncomment this # or override using something more restrictive: # http://tornado.readthedocs.org/en/latest/websocket.html#tornado.websocket.WebSocketHandler.check_origin # def check_origin(self, origin): # return True if __name__ == "__main__": logger = get_offline_logger() usage = __doc__ version = "" parser = optparse.OptionParser(usage, None, optparse.Option, version) parser.add_option('-p', '--port', default='8888', dest='port', help='socket') parser.add_option('-l', '--listen', default='0.0.0.0', dest='address', help='listener address') parser.add_option('-k', '--hmac_key', default='', dest='hmac_key', help='hmac_key') parser.add_option('-t', '--tokens', action='store_true', default=False, dest='tokens', help='require tockens to join') parser.add_option('-s', '--sslkey', default=False, dest='keyfile', help='require ssl keyfile full path') parser.add_option('-c', '--sslcert', default=False, dest='certfile', help='require ssl certfile full path') (options, args) = parser.parse_args() hmac_key = options.hmac_key DistributeHandler.tokens = options.tokens urls = [ (r'/', PostHandler), (r'/token', TokenHandler), (r'/realtime/(.*)', DistributeHandler)] application = tornado.web.Application(urls, auto_reload=True) if options.keyfile and options.certfile: ssl_options = dict(certfile=options.certfile, keyfile=options.keyfile) else: ssl_options = None http_server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_options) logger.debug('listening on %s:%s',options.address,options.port) http_server.listen(int(options.port), address=options.address) tornado.ioloop.IOLoop.instance().start()
[ "# -*- coding: utf-8 -*-\n", "#!/usr/bin/env python\n", "\n", "\"\"\"\n", "This file is part of the web2py Web Framework\n", "Copyrighted by Massimo Di Pierro <mdip...@cs.depaul.edu>\n", "License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n", "\n", "Attention: Requires Chrome or Safari. For IE of Firefox you need https://github.com/gimite/web-socket-js\n", "\n", "1) install tornado (requires Tornado 3.0 or later)\n", "\n", " easy_install tornado\n", "\n", "2) start this app:\n", "\n", " python gluon/contrib/websocket_messaging.py -k mykey -p 8888\n", "\n", "3) from any web2py app you can post messages with\n", "\n", " from gluon.contrib.websocket_messaging import websocket_send\n", " websocket_send('http://127.0.0.1:8888', 'Hello World', 'mykey', 'mygroup')\n", "\n", "4) from any template you can receive them with\n", "\n", " <script>\n", " $(document).ready(function(){\n", "\t if(!$.web2py.web2py_websocket('ws://127.0.0.1:8888/realtime/mygroup', function(e){alert(e.data)}))\n", "\n", "\t\t alert(\"html5 websocket not supported by your browser, try Google Chrome\");\n", " });\n", " </script>\n", "\n", "When the server posts a message, all clients connected to the page will popup an alert message\n", "Or if you want to send json messages and store evaluated json in a var called data:\n", "\n", " <script>\n", " $(document).ready(function(){\n", "\t var data;\n", "\t $.web2py.web2py_websocket('ws://127.0.0.1:8888/realtime/mygroup', function(e){data=eval('('+e.data+')')});\n", " });\n", " </script>\n", "\n", "- All communications between web2py and websocket_messaging will be digitally signed with hmac.\n", "- All validation is handled on the web2py side and there is no need to modify websocket_messaging.py\n", "- Multiple web2py instances can talk with one or more websocket_messaging servers.\n", "- \"ws://127.0.0.1:8888/realtime/\" must be contain the IP of the websocket_messaging server.\n", "- Via group='mygroup' name you can support multiple groups of clients (think of many chat-rooms)\n", "\n", "\n", "Here is a complete sample web2py action:\n", "\n", "\tdef index():\n", "\t\tform=LOAD('default', 'ajax_form', ajax=True)\n", "\t\tscript=SCRIPT('''\n", "\t\t\tjQuery(document).ready(function(){\n", "\t\t\t var callback=function(e){alert(e.data)};\n", "\t\t\t if(!$.web2py.web2py_websocket('ws://127.0.0.1:8888/realtime/mygroup', callback))\n", "\n", "\t\t\t\talert(\"html5 websocket not supported by your browser, try Google Chrome\");\n", "\t\t\t});\n", "\t\t''')\n", "\t\treturn dict(form=form, script=script)\n", "\n", "\tdef ajax_form():\n", "\t\tform=SQLFORM.factory(Field('message'))\n", "\t\tif form.accepts(request,session):\n", "\t\t\tfrom gluon.contrib.websocket_messaging import websocket_send\n", "\t\t\twebsocket_send(\n", "\t\t\t\t'http://127.0.0.1:8888', form.vars.message, 'mykey', 'mygroup')\n", "\t\treturn form\n", "\n", "https is possible too using 'https://127.0.0.1:8888' instead of 'http://127.0.0.1:8888', but need to\n", "be started with\n", "\n", " python gluon/contrib/websocket_messaging.py -k mykey -p 8888 -s keyfile.pem -c certfile.pem\n", "\n", "for secure websocket do:\n", "\n", " web2py_websocket('wss://127.0.0.1:8888/realtime/mygroup',callback)\n", "\n", "Acknowledgements:\n", "Tornado code inspired by http://thomas.pelletier.im/2010/08/websocket-tornado-redis/\n", "\n", "\"\"\"\n", "\n", "import tornado.httpserver\n", "import tornado.websocket\n", "import tornado.ioloop\n", "import tornado.web\n", "import hmac\n", "import sys\n", "import optparse\n", "import urllib\n", "import time\n", "\n", "listeners, names, tokens = {}, {}, {}\n", "\n", "import logging\n", "\n", "def websocket_send(url, message, hmac_key=None, group='default'):\n", "\tsig = hmac_key and hmac.new(hmac_key, message).hexdigest() or ''\n", "\tparams = urllib.urlencode(\n", "\t\t{'message': message, 'signature': sig, 'group': group})\n", "\tf = urllib.urlopen(url, params)\n", "\tdata = f.read()\n", "\tf.close()\n", "\treturn data\n", "\n", "\n", "class PostHandler(tornado.web.RequestHandler):\n", "\t\"\"\"\n", "\tonly authorized parties can post messages\n", "\t\"\"\"\n", "\tdef post(self):\n", "\t\tlogger.info('PostHandler')\n", "\t\tif hmac_key and not 'signature' in self.request.arguments:\n", "\t\t\tself.send_error(401)\n", "\t\tif 'message' in self.request.arguments:\n", "\t\t\tmessage = self.request.arguments['message'][0]\n", "\t\t\tgroup = self.request.arguments.get('group', ['default'])[0]\n", "\t\t\tprint '%s:MESSAGE to %s:%s' % (time.time(), group, message)\n", "\t\t\tif hmac_key:\n", "\t\t\t\tsignature = self.request.arguments['signature'][0]\n", "\t\t\t\tif not hmac.new(hmac_key, message).hexdigest() == signature:\n", "\t\t\t\t\tself.send_error(401)\n", "\t\t\tfor client in listeners.get(group, []):\n", "\t\t\t\tclient.write_message(message)\n", "\n", "\n", "class TokenHandler(tornado.web.RequestHandler):\n", "\t\"\"\"\n", "\tif running with -t post a token to allow a client to join using the token\n", "\tthe message here is the token (any uuid)\n", "\tallows only authorized parties to joins, for example, a chat\n", "\t\"\"\"\n", "\tdef post(self):\n", "\t\tlogger.info('TokenHandler')\n", "\t\tif hmac_key and not 'message' in self.request.arguments:\n", "\t\t\tself.send_error(401)\n", "\t\tif 'message' in self.request.arguments:\n", "\t\t\tmessage = self.request.arguments['message'][0]\n", "\t\t\tif hmac_key:\n", "\t\t\t\tsignature = self.request.arguments['signature'][0]\n", "\t\t\t\tif not hmac.new(hmac_key, message).hexdigest() == signature:\n", "\t\t\t\t\tself.send_error(401)\n", "\t\t\ttokens[message] = None\n", "\n", "\n", "class DistributeHandler(tornado.websocket.WebSocketHandler):\n", "\tdef open(self, params):\n", "\t\tlogger.info('DistributeHandler')\n", "\t\tgroup, token, name = params.split('/') + [None, None]\n", "\t\tself.group = group or 'default'\n", "\t\tself.token = token or 'none'\n", "\t\tself.name = name or 'anonymous'\n", "\t\t# only authorized parties can join\n", "\t\tif DistributeHandler.tokens:\n", "\t\t\tif not self.token in tokens or not token[self.token] is None:\n", "\t\t\t\tself.close()\n", "\t\t\telse:\n", "\t\t\t\ttokens[self.token] = self\n", "\t\tif not self.group in listeners:\n", "\t\t\tlisteners[self.group] = []\n", "\t\t# notify clients that a member has joined the groups\n", "\t\tfor client in listeners.get(self.group, []):\n", "\t\t\tclient.write_message('+' + self.name)\n", "\t\tlisteners[self.group].append(self)\n", "\t\tnames[self] = self.name\n", "\t\tprint '%s:CONNECT to %s' % (time.time(), self.group)\n", "\n", "\tdef on_message(self, message):\n", "\t\tpass\n", "\n", "\tdef on_close(self):\n", "\t\tif self.group in listeners:\n", "\t\t\tlisteners[self.group].remove(self)\n", "\t\tdel names[self]\n", "\t\t# notify clients that a member has left the groups\n", "\t\tfor client in listeners.get(self.group, []):\n", "\t\t\tclient.write_message('-' + self.name)\n", "\t\tprint '%s:DISCONNECT from %s' % (time.time(), self.group)\n", "\n", "def get_offline_logger():\n", "\tif 'logger' not in globals():\n", "\t\timport logging\n", "\t\tstream = logging.StreamHandler()\n", "\t\tstream.setFormatter(logging.Formatter('%(levelname)5s:%(module)15s.%(funcName)-20s:%(lineno)4d: %(message)s','%Y%m%d %H:%M:%S'))\n", "\t\tstream.setLevel(logging.DEBUG)\n", "\n", "\t\tlogger = logging.getLogger()\n", "\t\tlogger.setLevel(logging.DEBUG)\n", "\t\tlogger.addHandler(stream)\n", "\t\tglobals()['logger']=logger\n", "\treturn globals()['logger']\n", "\n", "# if your webserver is different from tornado server uncomment this\n", "# or override using something more restrictive:\n", "# http://tornado.readthedocs.org/en/latest/websocket.html#tornado.websocket.WebSocketHandler.check_origin\n", "# def check_origin(self, origin):\n", "#\treturn True\n", "\n", "if __name__ == \"__main__\":\n", "\tlogger = get_offline_logger()\n", "\tusage = __doc__\n", "\tversion = \"\"\n", "\tparser = optparse.OptionParser(usage, None, optparse.Option, version)\n", "\tparser.add_option('-p',\n", "\t\t\t\t\t '--port',\n", "\t\t\t\t\t default='8888',\n", "\t\t\t\t\t dest='port',\n", "\t\t\t\t\t help='socket')\n", "\tparser.add_option('-l',\n", "\t\t\t\t\t '--listen',\n", "\t\t\t\t\t default='0.0.0.0',\n", "\t\t\t\t\t dest='address',\n", "\t\t\t\t\t help='listener address')\n", "\tparser.add_option('-k',\n", "\t\t\t\t\t '--hmac_key',\n", "\t\t\t\t\t default='',\n", "\t\t\t\t\t dest='hmac_key',\n", "\t\t\t\t\t help='hmac_key')\n", "\tparser.add_option('-t',\n", "\t\t\t\t\t '--tokens',\n", "\t\t\t\t\t action='store_true',\n", "\t\t\t\t\t default=False,\n", "\t\t\t\t\t dest='tokens',\n", "\t\t\t\t\t help='require tockens to join')\n", "\tparser.add_option('-s',\n", "\t\t\t\t\t '--sslkey',\n", "\t\t\t\t\t default=False,\n", "\t\t\t\t\t dest='keyfile',\n", "\t\t\t\t\t help='require ssl keyfile full path')\n", "\tparser.add_option('-c',\n", "\t\t\t\t\t '--sslcert',\n", "\t\t\t\t\t default=False,\n", "\t\t\t\t\t dest='certfile',\n", "\t\t\t\t\t help='require ssl certfile full path')\n", "\t(options, args) = parser.parse_args()\n", "\thmac_key = options.hmac_key\n", "\tDistributeHandler.tokens = options.tokens\n", "\turls = [\n", "\t\t(r'/', PostHandler),\n", "\t\t(r'/token', TokenHandler),\n", "\t\t(r'/realtime/(.*)', DistributeHandler)]\n", "\tapplication = tornado.web.Application(urls, auto_reload=True)\n", "\tif options.keyfile and options.certfile:\n", "\t\tssl_options = dict(certfile=options.certfile, keyfile=options.keyfile)\n", "\telse:\n", "\t\tssl_options = None\n", "\thttp_server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_options)\n", "\tlogger.debug('listening on %s:%s',options.address,options.port)\n", "\thttp_server.listen(int(options.port), address=options.address)\n", "\ttornado.ioloop.IOLoop.instance().start()\n" ]
[ 0, 0.045454545454545456, 0, 0, 0, 0, 0, 0, 0.009523809523809525, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.029411764705882353, 0, 0.02564102564102564, 0.14285714285714285, 0, 0, 0.010526315789473684, 0.011904761904761904, 0, 0, 0, 0.15384615384615385, 0.02727272727272727, 0.14285714285714285, 0, 0, 0.010416666666666666, 0.009900990099009901, 0.012048192771084338, 0.010869565217391304, 0.010309278350515464, 0, 0, 0, 0, 0.14285714285714285, 0.02127659574468085, 0.05, 0.02631578947368421, 0.043478260869565216, 0.03488372093023256, 0, 0.012658227848101266, 0.14285714285714285, 0.14285714285714285, 0.025, 0, 0.05555555555555555, 0.024390243902439025, 0.027777777777777776, 0.015625, 0.05263157894736842, 0.014705882352941176, 0.07142857142857142, 0, 0.009900990099009901, 0, 0, 0.021052631578947368, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.06666666666666667, 0, 0.015151515151515152, 0.030303030303030304, 0.03571428571428571, 0.017241379310344827, 0.030303030303030304, 0.058823529411764705, 0.09090909090909091, 0.07692307692307693, 0, 0, 0, 0.2, 0.023255813953488372, 0.2, 0.058823529411764705, 0.034482758620689655, 0.03278688524590164, 0.041666666666666664, 0.023809523809523808, 0.02, 0.015873015873015872, 0.015873015873015872, 0.0625, 0.01818181818181818, 0.015384615384615385, 0.038461538461538464, 0.023255813953488372, 0.029411764705882353, 0, 0, 0, 0.2, 0.013333333333333334, 0.023809523809523808, 0.016129032258064516, 0.2, 0.058823529411764705, 0.03333333333333333, 0.03389830508474576, 0.041666666666666664, 0.023809523809523808, 0.02, 0.0625, 0.01818181818181818, 0.015384615384615385, 0.038461538461538464, 0.038461538461538464, 0, 0, 0, 0.04, 0.02857142857142857, 0.017857142857142856, 0.029411764705882353, 0.03225806451612903, 0.029411764705882353, 0.02702702702702703, 0.03225806451612903, 0.03076923076923077, 0.058823529411764705, 0.1111111111111111, 0.03333333333333333, 0.058823529411764705, 0.03333333333333333, 0.01818181818181818, 0.02127659574468085, 0.024390243902439025, 0.02702702702702703, 0.038461538461538464, 0.01818181818181818, 0, 0.03125, 0.14285714285714285, 0, 0.047619047619047616, 0.03333333333333333, 0.02631578947368421, 0.05555555555555555, 0.018867924528301886, 0.02127659574468085, 0.024390243902439025, 0.016666666666666666, 0, 0.038461538461538464, 0.03225806451612903, 0.058823529411764705, 0.02857142857142857, 0.022900763358778626, 0.030303030303030304, 0, 0.03225806451612903, 0.030303030303030304, 0.03571428571428571, 0.06896551724137931, 0.03571428571428571, 0, 0, 0, 0, 0, 0.07142857142857142, 0, 0.037037037037037035, 0.03225806451612903, 0.058823529411764705, 0.07142857142857142, 0.014084507042253521, 0.04, 0.17647058823529413, 0.13043478260869565, 0.15, 0.13636363636363635, 0.04, 0.15789473684210525, 0.11538461538461539, 0.13043478260869565, 0.09375, 0.04, 0.14285714285714285, 0.15789473684210525, 0.125, 0.125, 0.04, 0.15789473684210525, 0.10714285714285714, 0.13636363636363635, 0.13636363636363635, 0.07692307692307693, 0.04, 0.15789473684210525, 0.13636363636363635, 0.13043478260869565, 0.06666666666666667, 0.04, 0.15, 0.13636363636363635, 0.125, 0.06521739130434782, 0.02564102564102564, 0.034482758620689655, 0.023255813953488372, 0.1, 0.043478260869565216, 0.034482758620689655, 0.023809523809523808, 0.015873015873015872, 0.023809523809523808, 0.0136986301369863, 0.14285714285714285, 0.047619047619047616, 0.024096385542168676, 0.046153846153846156, 0.015625, 0.023809523809523808 ]
254
0.037969
# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class ipset_nsip_binding(base_resource) : """ Binding class showing the nsip that can be bound to ipset. """ def __init__(self) : self._ipaddress = "" self._name = "" self.___count = 0 @property def name(self) : ur"""Name of the IP set to which to bind IP addresses.<br/>Minimum length = 1. """ try : return self._name except Exception as e: raise e @name.setter def name(self, name) : ur"""Name of the IP set to which to bind IP addresses.<br/>Minimum length = 1 """ try : self._name = name except Exception as e: raise e @property def ipaddress(self) : ur"""One or more IP addresses bound to the IP set.<br/>Minimum length = 1. """ try : return self._ipaddress except Exception as e: raise e @ipaddress.setter def ipaddress(self, ipaddress) : ur"""One or more IP addresses bound to the IP set.<br/>Minimum length = 1 """ try : self._ipaddress = ipaddress except Exception as e: raise e def _get_nitro_response(self, service, response) : ur""" converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(ipset_nsip_binding_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.ipset_nsip_binding except Exception as e : raise e def _get_object_name(self) : ur""" Returns the value of object identifier argument """ try : if self.name is not None : return str(self.name) return None except Exception as e : raise e @classmethod def add(cls, client, resource) : try : if resource and type(resource) is not list : updateresource = ipset_nsip_binding() updateresource.name = resource.name updateresource.ipaddress = resource.ipaddress return updateresource.update_resource(client) else : if resource and len(resource) > 0 : updateresources = [ipset_nsip_binding() for _ in range(len(resource))] for i in range(len(resource)) : updateresources[i].name = resource[i].name updateresources[i].ipaddress = resource[i].ipaddress return cls.update_bulk_request(client, updateresources) except Exception as e : raise e @classmethod def delete(cls, client, resource) : try : if resource and type(resource) is not list : deleteresource = ipset_nsip_binding() deleteresource.name = resource.name deleteresource.ipaddress = resource.ipaddress return deleteresource.delete_resource(client) else : if resource and len(resource) > 0 : deleteresources = [ipset_nsip_binding() for _ in range(len(resource))] for i in range(len(resource)) : deleteresources[i].name = resource[i].name deleteresources[i].ipaddress = resource[i].ipaddress return cls.delete_bulk_request(client, deleteresources) except Exception as e : raise e @classmethod def get(cls, service, name) : ur""" Use this API to fetch ipset_nsip_binding resources. """ try : obj = ipset_nsip_binding() obj.name = name response = obj.get_resources(service) return response except Exception as e: raise e @classmethod def get_filtered(cls, service, name, filter_) : ur""" Use this API to fetch filtered set of ipset_nsip_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = ipset_nsip_binding() obj.name = name option_ = options() option_.filter = filter_ response = obj.getfiltered(service, option_) return response except Exception as e: raise e @classmethod def count(cls, service, name) : ur""" Use this API to count ipset_nsip_binding resources configued on NetScaler. """ try : obj = ipset_nsip_binding() obj.name = name option_ = options() option_.count = True response = obj.get_resources(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e @classmethod def count_filtered(cls, service, name, filter_) : ur""" Use this API to count the filtered set of ipset_nsip_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = ipset_nsip_binding() obj.name = name option_ = options() option_.count = True option_.filter = filter_ response = obj.getfiltered(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e class ipset_nsip_binding_response(base_response) : def __init__(self, length=1) : self.ipset_nsip_binding = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.ipset_nsip_binding = [ipset_nsip_binding() for _ in range(length)]
[ "#\n", "# Copyright (c) 2008-2015 Citrix Systems, Inc.\n", "#\n", "# Licensed under the Apache License, Version 2.0 (the \"License\")\n", "# you may not use this file except in compliance with the License.\n", "# You may obtain a copy of the License at\n", "#\n", "# http://www.apache.org/licenses/LICENSE-2.0\n", "#\n", "# Unless required by applicable law or agreed to in writing, software\n", "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", "# See the License for the specific language governing permissions and\n", "# limitations under the License.\n", "#\n", "\n", "from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource\n", "from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response\n", "from nssrc.com.citrix.netscaler.nitro.service.options import options\n", "from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception\n", "\n", "from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util\n", "\n", "class ipset_nsip_binding(base_resource) :\n", "\t\"\"\" Binding class showing the nsip that can be bound to ipset.\n", "\t\"\"\"\n", "\tdef __init__(self) :\n", "\t\tself._ipaddress = \"\"\n", "\t\tself._name = \"\"\n", "\t\tself.___count = 0\n", "\n", "\t@property\n", "\tdef name(self) :\n", "\t\tur\"\"\"Name of the IP set to which to bind IP addresses.<br/>Minimum length = 1.\n", "\t\t\"\"\"\n", "\t\ttry :\n", "\t\t\treturn self._name\n", "\t\texcept Exception as e:\n", "\t\t\traise e\n", "\n", "\t@name.setter\n", "\tdef name(self, name) :\n", "\t\tur\"\"\"Name of the IP set to which to bind IP addresses.<br/>Minimum length = 1\n", "\t\t\"\"\"\n", "\t\ttry :\n", "\t\t\tself._name = name\n", "\t\texcept Exception as e:\n", "\t\t\traise e\n", "\n", "\t@property\n", "\tdef ipaddress(self) :\n", "\t\tur\"\"\"One or more IP addresses bound to the IP set.<br/>Minimum length = 1.\n", "\t\t\"\"\"\n", "\t\ttry :\n", "\t\t\treturn self._ipaddress\n", "\t\texcept Exception as e:\n", "\t\t\traise e\n", "\n", "\t@ipaddress.setter\n", "\tdef ipaddress(self, ipaddress) :\n", "\t\tur\"\"\"One or more IP addresses bound to the IP set.<br/>Minimum length = 1\n", "\t\t\"\"\"\n", "\t\ttry :\n", "\t\t\tself._ipaddress = ipaddress\n", "\t\texcept Exception as e:\n", "\t\t\traise e\n", "\n", "\tdef _get_nitro_response(self, service, response) :\n", "\t\tur\"\"\" converts nitro response into object and returns the object array in case of get request.\n", "\t\t\"\"\"\n", "\t\ttry :\n", "\t\t\tresult = service.payload_formatter.string_to_resource(ipset_nsip_binding_response, response, self.__class__.__name__)\n", "\t\t\tif(result.errorcode != 0) :\n", "\t\t\t\tif (result.errorcode == 444) :\n", "\t\t\t\t\tservice.clear_session(self)\n", "\t\t\t\tif result.severity :\n", "\t\t\t\t\tif (result.severity == \"ERROR\") :\n", "\t\t\t\t\t\traise nitro_exception(result.errorcode, str(result.message), str(result.severity))\n", "\t\t\t\telse :\n", "\t\t\t\t\traise nitro_exception(result.errorcode, str(result.message), str(result.severity))\n", "\t\t\treturn result.ipset_nsip_binding\n", "\t\texcept Exception as e :\n", "\t\t\traise e\n", "\n", "\tdef _get_object_name(self) :\n", "\t\tur\"\"\" Returns the value of object identifier argument\n", "\t\t\"\"\"\n", "\t\ttry :\n", "\t\t\tif self.name is not None :\n", "\t\t\t\treturn str(self.name)\n", "\t\t\treturn None\n", "\t\texcept Exception as e :\n", "\t\t\traise e\n", "\n", "\n", "\n", "\t@classmethod\n", "\tdef add(cls, client, resource) :\n", "\t\ttry :\n", "\t\t\tif resource and type(resource) is not list :\n", "\t\t\t\tupdateresource = ipset_nsip_binding()\n", "\t\t\t\tupdateresource.name = resource.name\n", "\t\t\t\tupdateresource.ipaddress = resource.ipaddress\n", "\t\t\t\treturn updateresource.update_resource(client)\n", "\t\t\telse :\n", "\t\t\t\tif resource and len(resource) > 0 :\n", "\t\t\t\t\tupdateresources = [ipset_nsip_binding() for _ in range(len(resource))]\n", "\t\t\t\t\tfor i in range(len(resource)) :\n", "\t\t\t\t\t\tupdateresources[i].name = resource[i].name\n", "\t\t\t\t\t\tupdateresources[i].ipaddress = resource[i].ipaddress\n", "\t\t\t\treturn cls.update_bulk_request(client, updateresources)\n", "\t\texcept Exception as e :\n", "\t\t\traise e\n", "\n", "\t@classmethod\n", "\tdef delete(cls, client, resource) :\n", "\t\ttry :\n", "\t\t\tif resource and type(resource) is not list :\n", "\t\t\t\tdeleteresource = ipset_nsip_binding()\n", "\t\t\t\tdeleteresource.name = resource.name\n", "\t\t\t\tdeleteresource.ipaddress = resource.ipaddress\n", "\t\t\t\treturn deleteresource.delete_resource(client)\n", "\t\t\telse :\n", "\t\t\t\tif resource and len(resource) > 0 :\n", "\t\t\t\t\tdeleteresources = [ipset_nsip_binding() for _ in range(len(resource))]\n", "\t\t\t\t\tfor i in range(len(resource)) :\n", "\t\t\t\t\t\tdeleteresources[i].name = resource[i].name\n", "\t\t\t\t\t\tdeleteresources[i].ipaddress = resource[i].ipaddress\n", "\t\t\t\treturn cls.delete_bulk_request(client, deleteresources)\n", "\t\texcept Exception as e :\n", "\t\t\traise e\n", "\n", "\t@classmethod\n", "\tdef get(cls, service, name) :\n", "\t\tur\"\"\" Use this API to fetch ipset_nsip_binding resources.\n", "\t\t\"\"\"\n", "\t\ttry :\n", "\t\t\tobj = ipset_nsip_binding()\n", "\t\t\tobj.name = name\n", "\t\t\tresponse = obj.get_resources(service)\n", "\t\t\treturn response\n", "\t\texcept Exception as e:\n", "\t\t\traise e\n", "\n", "\t@classmethod\n", "\tdef get_filtered(cls, service, name, filter_) :\n", "\t\tur\"\"\" Use this API to fetch filtered set of ipset_nsip_binding resources.\n", "\t\tFilter string should be in JSON format.eg: \"port:80,servicetype:HTTP\".\n", "\t\t\"\"\"\n", "\t\ttry :\n", "\t\t\tobj = ipset_nsip_binding()\n", "\t\t\tobj.name = name\n", "\t\t\toption_ = options()\n", "\t\t\toption_.filter = filter_\n", "\t\t\tresponse = obj.getfiltered(service, option_)\n", "\t\t\treturn response\n", "\t\texcept Exception as e:\n", "\t\t\traise e\n", "\n", "\t@classmethod\n", "\tdef count(cls, service, name) :\n", "\t\tur\"\"\" Use this API to count ipset_nsip_binding resources configued on NetScaler.\n", "\t\t\"\"\"\n", "\t\ttry :\n", "\t\t\tobj = ipset_nsip_binding()\n", "\t\t\tobj.name = name\n", "\t\t\toption_ = options()\n", "\t\t\toption_.count = True\n", "\t\t\tresponse = obj.get_resources(service, option_)\n", "\t\t\tif response :\n", "\t\t\t\treturn response[0].__dict__['___count']\n", "\t\t\treturn 0\n", "\t\texcept Exception as e:\n", "\t\t\traise e\n", "\n", "\t@classmethod\n", "\tdef count_filtered(cls, service, name, filter_) :\n", "\t\tur\"\"\" Use this API to count the filtered set of ipset_nsip_binding resources.\n", "\t\tFilter string should be in JSON format.eg: \"port:80,servicetype:HTTP\".\n", "\t\t\"\"\"\n", "\t\ttry :\n", "\t\t\tobj = ipset_nsip_binding()\n", "\t\t\tobj.name = name\n", "\t\t\toption_ = options()\n", "\t\t\toption_.count = True\n", "\t\t\toption_.filter = filter_\n", "\t\t\tresponse = obj.getfiltered(service, option_)\n", "\t\t\tif response :\n", "\t\t\t\treturn response[0].__dict__['___count']\n", "\t\t\treturn 0\n", "\t\texcept Exception as e:\n", "\t\t\traise e\n", "\n", "class ipset_nsip_binding_response(base_response) :\n", "\tdef __init__(self, length=1) :\n", "\t\tself.ipset_nsip_binding = []\n", "\t\tself.errorcode = 0\n", "\t\tself.message = \"\"\n", "\t\tself.severity = \"\"\n", "\t\tself.sessionid = \"\"\n", "\t\tself.ipset_nsip_binding = [ipset_nsip_binding() for _ in range(length)]\n", "\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0.011494252873563218, 0, 0.011494252873563218, 0, 0, 0, 0.047619047619047616, 0.015625, 0.2, 0.09090909090909091, 0.043478260869565216, 0.05555555555555555, 0.05, 0, 0.09090909090909091, 0.1111111111111111, 0.024390243902439025, 0.16666666666666666, 0.25, 0.047619047619047616, 0.04, 0.09090909090909091, 0, 0.07142857142857142, 0.08333333333333333, 0.024691358024691357, 0.16666666666666666, 0.25, 0.047619047619047616, 0.04, 0.09090909090909091, 0, 0.09090909090909091, 0.08695652173913043, 0.01282051282051282, 0.16666666666666666, 0.25, 0.038461538461538464, 0.04, 0.09090909090909091, 0, 0.05263157894736842, 0.058823529411764705, 0.012987012987012988, 0.16666666666666666, 0.25, 0.03225806451612903, 0.04, 0.09090909090909091, 0, 0.038461538461538464, 0.020618556701030927, 0.16666666666666666, 0.25, 0.01652892561983471, 0.06451612903225806, 0.05714285714285714, 0.030303030303030304, 0.08, 0.05128205128205128, 0.02247191011235955, 0.18181818181818182, 0.022727272727272728, 0.027777777777777776, 0.07692307692307693, 0.09090909090909091, 0, 0.06666666666666667, 0.017857142857142856, 0.16666666666666666, 0.25, 0.06666666666666667, 0.038461538461538464, 0.06666666666666667, 0.07692307692307693, 0.09090909090909091, 0, 0, 0, 0.14285714285714285, 0.058823529411764705, 0.25, 0.041666666666666664, 0.023809523809523808, 0.025, 0.02, 0.02, 0.2, 0.05, 0.013157894736842105, 0.05405405405405406, 0.02040816326530612, 0.01694915254237288, 0.016666666666666666, 0.07692307692307693, 0.09090909090909091, 0, 0.07142857142857142, 0.05405405405405406, 0.25, 0.041666666666666664, 0.023809523809523808, 0.025, 0.02, 0.02, 0.2, 0.05, 0.013157894736842105, 0.05405405405405406, 0.02040816326530612, 0.01694915254237288, 0.016666666666666666, 0.07692307692307693, 0.09090909090909091, 0, 0.07142857142857142, 0.06451612903225806, 0.016666666666666666, 0.16666666666666666, 0.25, 0.03333333333333333, 0.05263157894736842, 0.024390243902439025, 0.05263157894736842, 0.04, 0.09090909090909091, 0, 0.07142857142857142, 0.04081632653061224, 0.013157894736842105, 0.0136986301369863, 0.16666666666666666, 0.25, 0.03333333333333333, 0.05263157894736842, 0.043478260869565216, 0.03571428571428571, 0.020833333333333332, 0.05263157894736842, 0.04, 0.09090909090909091, 0, 0.07142857142857142, 0.06060606060606061, 0.024096385542168676, 0.16666666666666666, 0.25, 0.03333333333333333, 0.05263157894736842, 0.043478260869565216, 0.041666666666666664, 0.02, 0.11764705882352941, 0.022727272727272728, 0.08333333333333333, 0.04, 0.09090909090909091, 0, 0.07142857142857142, 0.0392156862745098, 0.0125, 0.0136986301369863, 0.16666666666666666, 0.25, 0.03333333333333333, 0.05263157894736842, 0.043478260869565216, 0.041666666666666664, 0.03571428571428571, 0.020833333333333332, 0.11764705882352941, 0.022727272727272728, 0.08333333333333333, 0.04, 0.09090909090909091, 0, 0.0392156862745098, 0.0625, 0.03225806451612903, 0.047619047619047616, 0.05, 0.047619047619047616, 0.045454545454545456, 0.013513513513513514, 1 ]
202
0.065352
"""The tests for the Tasmota cover platform.""" import copy import json from hatasmota.utils import ( get_topic_stat_result, get_topic_stat_status, get_topic_tele_sensor, get_topic_tele_will, ) from homeassistant.components import cover from homeassistant.components.tasmota.const import DEFAULT_PREFIX from homeassistant.const import ATTR_ASSUMED_STATE, STATE_UNKNOWN from .test_common import ( DEFAULT_CONFIG, help_test_availability, help_test_availability_discovery_update, help_test_availability_poll_state, help_test_availability_when_connection_lost, help_test_discovery_device_remove, help_test_discovery_removal, help_test_discovery_update_unchanged, help_test_entity_id_update_discovery_update, help_test_entity_id_update_subscriptions, ) from tests.async_mock import patch from tests.common import async_fire_mqtt_message async def test_missing_relay(hass, mqtt_mock, setup_tasmota): """Test no cover is discovered if relays are missing.""" async def test_controlling_state_via_mqtt(hass, mqtt_mock, setup_tasmota): """Test state update via MQTT.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 3 config["rl"][1] = 3 mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() state = hass.states.get("cover.tasmota_cover_1") assert state.state == "unavailable" assert not state.attributes.get(ATTR_ASSUMED_STATE) async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") state = hass.states.get("cover.tasmota_cover_1") assert state.state == STATE_UNKNOWN assert ( state.attributes["supported_features"] == cover.SUPPORT_OPEN | cover.SUPPORT_CLOSE | cover.SUPPORT_STOP | cover.SUPPORT_SET_POSITION ) assert not state.attributes.get(ATTR_ASSUMED_STATE) # Periodic updates async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/SENSOR", '{"Shutter1":{"Position":54,"Direction":-1}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "closing" assert state.attributes["current_position"] == 54 async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/SENSOR", '{"Shutter1":{"Position":100,"Direction":1}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "opening" assert state.attributes["current_position"] == 100 async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/SENSOR", '{"Shutter1":{"Position":0,"Direction":0}}' ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "closed" assert state.attributes["current_position"] == 0 async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/SENSOR", '{"Shutter1":{"Position":1,"Direction":0}}' ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "open" assert state.attributes["current_position"] == 1 async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/SENSOR", '{"Shutter1":{"Position":100,"Direction":0}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "open" assert state.attributes["current_position"] == 100 # State poll response async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Shutter1":{"Position":54,"Direction":-1}}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "closing" assert state.attributes["current_position"] == 54 async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Shutter1":{"Position":100,"Direction":1}}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "opening" assert state.attributes["current_position"] == 100 async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Shutter1":{"Position":0,"Direction":0}}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "closed" assert state.attributes["current_position"] == 0 async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Shutter1":{"Position":1,"Direction":0}}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "open" assert state.attributes["current_position"] == 1 async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Shutter1":{"Position":100,"Direction":0}}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "open" assert state.attributes["current_position"] == 100 # Command response async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/RESULT", '{"Shutter1":{"Position":54,"Direction":-1}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "closing" assert state.attributes["current_position"] == 54 async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/RESULT", '{"Shutter1":{"Position":100,"Direction":1}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "opening" assert state.attributes["current_position"] == 100 async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/RESULT", '{"Shutter1":{"Position":0,"Direction":0}}' ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "closed" assert state.attributes["current_position"] == 0 async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/RESULT", '{"Shutter1":{"Position":1,"Direction":0}}' ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "open" assert state.attributes["current_position"] == 1 async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/RESULT", '{"Shutter1":{"Position":100,"Direction":0}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "open" assert state.attributes["current_position"] == 100 async def test_controlling_state_via_mqtt_inverted(hass, mqtt_mock, setup_tasmota): """Test state update via MQTT.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 3 config["rl"][1] = 3 config["sho"] = [1] # Inverted cover mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() state = hass.states.get("cover.tasmota_cover_1") assert state.state == "unavailable" assert not state.attributes.get(ATTR_ASSUMED_STATE) async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") state = hass.states.get("cover.tasmota_cover_1") assert state.state == STATE_UNKNOWN assert ( state.attributes["supported_features"] == cover.SUPPORT_OPEN | cover.SUPPORT_CLOSE | cover.SUPPORT_STOP | cover.SUPPORT_SET_POSITION ) assert not state.attributes.get(ATTR_ASSUMED_STATE) # Periodic updates async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/SENSOR", '{"Shutter1":{"Position":54,"Direction":-1}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "opening" assert state.attributes["current_position"] == 46 async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/SENSOR", '{"Shutter1":{"Position":100,"Direction":1}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "closing" assert state.attributes["current_position"] == 0 async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/SENSOR", '{"Shutter1":{"Position":0,"Direction":0}}' ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "open" assert state.attributes["current_position"] == 100 async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/SENSOR", '{"Shutter1":{"Position":99,"Direction":0}}' ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "open" assert state.attributes["current_position"] == 1 async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/SENSOR", '{"Shutter1":{"Position":100,"Direction":0}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "closed" assert state.attributes["current_position"] == 0 # State poll response async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Shutter1":{"Position":54,"Direction":-1}}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "opening" assert state.attributes["current_position"] == 46 async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Shutter1":{"Position":100,"Direction":1}}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "closing" assert state.attributes["current_position"] == 0 async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Shutter1":{"Position":0,"Direction":0}}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "open" assert state.attributes["current_position"] == 100 async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Shutter1":{"Position":99,"Direction":0}}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "open" assert state.attributes["current_position"] == 1 async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Shutter1":{"Position":100,"Direction":0}}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "closed" assert state.attributes["current_position"] == 0 # Command response async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/RESULT", '{"Shutter1":{"Position":54,"Direction":-1}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "opening" assert state.attributes["current_position"] == 46 async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/RESULT", '{"Shutter1":{"Position":100,"Direction":1}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "closing" assert state.attributes["current_position"] == 0 async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/RESULT", '{"Shutter1":{"Position":0,"Direction":0}}' ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "open" assert state.attributes["current_position"] == 100 async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/RESULT", '{"Shutter1":{"Position":1,"Direction":0}}' ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "open" assert state.attributes["current_position"] == 99 async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/RESULT", '{"Shutter1":{"Position":100,"Direction":0}}', ) state = hass.states.get("cover.tasmota_cover_1") assert state.state == "closed" assert state.attributes["current_position"] == 0 async def call_service(hass, entity_id, service, **kwargs): """Call a fan service.""" await hass.services.async_call( cover.DOMAIN, service, {"entity_id": entity_id, **kwargs}, blocking=True, ) async def test_sending_mqtt_commands(hass, mqtt_mock, setup_tasmota): """Test the sending MQTT commands.""" config = copy.deepcopy(DEFAULT_CONFIG) config["dn"] = "Test" config["rl"][0] = 3 config["rl"][1] = 3 mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") state = hass.states.get("cover.test_cover_1") assert state.state == STATE_UNKNOWN await hass.async_block_till_done() await hass.async_block_till_done() mqtt_mock.async_publish.reset_mock() # Close the cover and verify MQTT message is sent await call_service(hass, "cover.test_cover_1", "close_cover") mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/ShutterClose1", "", 0, False ) mqtt_mock.async_publish.reset_mock() # Tasmota is not optimistic, the state should still be unknown state = hass.states.get("cover.test_cover_1") assert state.state == STATE_UNKNOWN # Open the cover and verify MQTT message is sent await call_service(hass, "cover.test_cover_1", "open_cover") mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/ShutterOpen1", "", 0, False ) mqtt_mock.async_publish.reset_mock() # Stop the cover and verify MQTT message is sent await call_service(hass, "cover.test_cover_1", "stop_cover") mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/ShutterStop1", "", 0, False ) mqtt_mock.async_publish.reset_mock() # Set position and verify MQTT message is sent await call_service(hass, "cover.test_cover_1", "set_cover_position", position=0) mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/ShutterPosition1", "0", 0, False ) mqtt_mock.async_publish.reset_mock() # Set position and verify MQTT message is sent await call_service(hass, "cover.test_cover_1", "set_cover_position", position=99) mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/ShutterPosition1", "99", 0, False ) mqtt_mock.async_publish.reset_mock() async def test_sending_mqtt_commands_inverted(hass, mqtt_mock, setup_tasmota): """Test the sending MQTT commands.""" config = copy.deepcopy(DEFAULT_CONFIG) config["dn"] = "Test" config["rl"][0] = 3 config["rl"][1] = 3 config["sho"] = [1] # Inverted cover mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") state = hass.states.get("cover.test_cover_1") assert state.state == STATE_UNKNOWN await hass.async_block_till_done() await hass.async_block_till_done() mqtt_mock.async_publish.reset_mock() # Close the cover and verify MQTT message is sent await call_service(hass, "cover.test_cover_1", "close_cover") mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/ShutterClose1", "", 0, False ) mqtt_mock.async_publish.reset_mock() # Tasmota is not optimistic, the state should still be unknown state = hass.states.get("cover.test_cover_1") assert state.state == STATE_UNKNOWN # Open the cover and verify MQTT message is sent await call_service(hass, "cover.test_cover_1", "open_cover") mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/ShutterOpen1", "", 0, False ) mqtt_mock.async_publish.reset_mock() # Stop the cover and verify MQTT message is sent await call_service(hass, "cover.test_cover_1", "stop_cover") mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/ShutterStop1", "", 0, False ) mqtt_mock.async_publish.reset_mock() # Set position and verify MQTT message is sent await call_service(hass, "cover.test_cover_1", "set_cover_position", position=0) mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/ShutterPosition1", "100", 0, False ) mqtt_mock.async_publish.reset_mock() # Set position and verify MQTT message is sent await call_service(hass, "cover.test_cover_1", "set_cover_position", position=99) mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/ShutterPosition1", "1", 0, False ) mqtt_mock.async_publish.reset_mock() async def test_availability_when_connection_lost( hass, mqtt_client_mock, mqtt_mock, setup_tasmota ): """Test availability after MQTT disconnection.""" config = copy.deepcopy(DEFAULT_CONFIG) config["dn"] = "Test" config["rl"][0] = 3 config["rl"][1] = 3 await help_test_availability_when_connection_lost( hass, mqtt_client_mock, mqtt_mock, cover.DOMAIN, config, entity_id="test_cover_1", ) async def test_availability(hass, mqtt_mock, setup_tasmota): """Test availability.""" config = copy.deepcopy(DEFAULT_CONFIG) config["dn"] = "Test" config["rl"][0] = 3 config["rl"][1] = 3 await help_test_availability( hass, mqtt_mock, cover.DOMAIN, config, entity_id="test_cover_1" ) async def test_availability_discovery_update(hass, mqtt_mock, setup_tasmota): """Test availability discovery update.""" config = copy.deepcopy(DEFAULT_CONFIG) config["dn"] = "Test" config["rl"][0] = 3 config["rl"][1] = 3 await help_test_availability_discovery_update( hass, mqtt_mock, cover.DOMAIN, config, entity_id="test_cover_1" ) async def test_availability_poll_state( hass, mqtt_client_mock, mqtt_mock, setup_tasmota ): """Test polling after MQTT connection (re)established.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 3 config["rl"][1] = 3 poll_topic = "tasmota_49A3BC/cmnd/STATUS" await help_test_availability_poll_state( hass, mqtt_client_mock, mqtt_mock, cover.DOMAIN, config, poll_topic, "10" ) async def test_discovery_removal_cover(hass, mqtt_mock, caplog, setup_tasmota): """Test removal of discovered cover.""" config1 = copy.deepcopy(DEFAULT_CONFIG) config1["dn"] = "Test" config1["rl"][0] = 3 config1["rl"][1] = 3 config2 = copy.deepcopy(DEFAULT_CONFIG) config2["dn"] = "Test" config2["rl"][0] = 0 config2["rl"][1] = 0 await help_test_discovery_removal( hass, mqtt_mock, caplog, cover.DOMAIN, config1, config2, entity_id="test_cover_1", name="Test cover 1", ) async def test_discovery_update_unchanged_cover(hass, mqtt_mock, caplog, setup_tasmota): """Test update of discovered cover.""" config = copy.deepcopy(DEFAULT_CONFIG) config["dn"] = "Test" config["rl"][0] = 3 config["rl"][1] = 3 with patch( "homeassistant.components.tasmota.cover.TasmotaCover.discovery_update" ) as discovery_update: await help_test_discovery_update_unchanged( hass, mqtt_mock, caplog, cover.DOMAIN, config, discovery_update, entity_id="test_cover_1", name="Test cover 1", ) async def test_discovery_device_remove(hass, mqtt_mock, setup_tasmota): """Test device registry remove.""" config = copy.deepcopy(DEFAULT_CONFIG) config["dn"] = "Test" config["rl"][0] = 3 config["rl"][1] = 3 unique_id = f"{DEFAULT_CONFIG['mac']}_cover_shutter_0" await help_test_discovery_device_remove( hass, mqtt_mock, cover.DOMAIN, unique_id, config ) async def test_entity_id_update_subscriptions(hass, mqtt_mock, setup_tasmota): """Test MQTT subscriptions are managed when entity_id is updated.""" config = copy.deepcopy(DEFAULT_CONFIG) config["dn"] = "Test" config["rl"][0] = 3 config["rl"][1] = 3 topics = [ get_topic_stat_result(config), get_topic_tele_sensor(config), get_topic_stat_status(config, 10), get_topic_tele_will(config), ] await help_test_entity_id_update_subscriptions( hass, mqtt_mock, cover.DOMAIN, config, topics, entity_id="test_cover_1" ) async def test_entity_id_update_discovery_update(hass, mqtt_mock, setup_tasmota): """Test MQTT discovery update when entity_id is updated.""" config = copy.deepcopy(DEFAULT_CONFIG) config["dn"] = "Test" config["rl"][0] = 3 config["rl"][1] = 3 await help_test_entity_id_update_discovery_update( hass, mqtt_mock, cover.DOMAIN, config, entity_id="test_cover_1" )
[ "\"\"\"The tests for the Tasmota cover platform.\"\"\"\n", "import copy\n", "import json\n", "\n", "from hatasmota.utils import (\n", " get_topic_stat_result,\n", " get_topic_stat_status,\n", " get_topic_tele_sensor,\n", " get_topic_tele_will,\n", ")\n", "\n", "from homeassistant.components import cover\n", "from homeassistant.components.tasmota.const import DEFAULT_PREFIX\n", "from homeassistant.const import ATTR_ASSUMED_STATE, STATE_UNKNOWN\n", "\n", "from .test_common import (\n", " DEFAULT_CONFIG,\n", " help_test_availability,\n", " help_test_availability_discovery_update,\n", " help_test_availability_poll_state,\n", " help_test_availability_when_connection_lost,\n", " help_test_discovery_device_remove,\n", " help_test_discovery_removal,\n", " help_test_discovery_update_unchanged,\n", " help_test_entity_id_update_discovery_update,\n", " help_test_entity_id_update_subscriptions,\n", ")\n", "\n", "from tests.async_mock import patch\n", "from tests.common import async_fire_mqtt_message\n", "\n", "\n", "async def test_missing_relay(hass, mqtt_mock, setup_tasmota):\n", " \"\"\"Test no cover is discovered if relays are missing.\"\"\"\n", "\n", "\n", "async def test_controlling_state_via_mqtt(hass, mqtt_mock, setup_tasmota):\n", " \"\"\"Test state update via MQTT.\"\"\"\n", " config = copy.deepcopy(DEFAULT_CONFIG)\n", " config[\"rl\"][0] = 3\n", " config[\"rl\"][1] = 3\n", " mac = config[\"mac\"]\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " f\"{DEFAULT_PREFIX}/{mac}/config\",\n", " json.dumps(config),\n", " )\n", " await hass.async_block_till_done()\n", "\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"unavailable\"\n", " assert not state.attributes.get(ATTR_ASSUMED_STATE)\n", "\n", " async_fire_mqtt_message(hass, \"tasmota_49A3BC/tele/LWT\", \"Online\")\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == STATE_UNKNOWN\n", " assert (\n", " state.attributes[\"supported_features\"]\n", " == cover.SUPPORT_OPEN\n", " | cover.SUPPORT_CLOSE\n", " | cover.SUPPORT_STOP\n", " | cover.SUPPORT_SET_POSITION\n", " )\n", " assert not state.attributes.get(ATTR_ASSUMED_STATE)\n", "\n", " # Periodic updates\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/tele/SENSOR\",\n", " '{\"Shutter1\":{\"Position\":54,\"Direction\":-1}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"closing\"\n", " assert state.attributes[\"current_position\"] == 54\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/tele/SENSOR\",\n", " '{\"Shutter1\":{\"Position\":100,\"Direction\":1}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"opening\"\n", " assert state.attributes[\"current_position\"] == 100\n", "\n", " async_fire_mqtt_message(\n", " hass, \"tasmota_49A3BC/tele/SENSOR\", '{\"Shutter1\":{\"Position\":0,\"Direction\":0}}'\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"closed\"\n", " assert state.attributes[\"current_position\"] == 0\n", "\n", " async_fire_mqtt_message(\n", " hass, \"tasmota_49A3BC/tele/SENSOR\", '{\"Shutter1\":{\"Position\":1,\"Direction\":0}}'\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"open\"\n", " assert state.attributes[\"current_position\"] == 1\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/tele/SENSOR\",\n", " '{\"Shutter1\":{\"Position\":100,\"Direction\":0}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"open\"\n", " assert state.attributes[\"current_position\"] == 100\n", "\n", " # State poll response\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/stat/STATUS10\",\n", " '{\"StatusSNS\":{\"Shutter1\":{\"Position\":54,\"Direction\":-1}}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"closing\"\n", " assert state.attributes[\"current_position\"] == 54\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/stat/STATUS10\",\n", " '{\"StatusSNS\":{\"Shutter1\":{\"Position\":100,\"Direction\":1}}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"opening\"\n", " assert state.attributes[\"current_position\"] == 100\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/stat/STATUS10\",\n", " '{\"StatusSNS\":{\"Shutter1\":{\"Position\":0,\"Direction\":0}}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"closed\"\n", " assert state.attributes[\"current_position\"] == 0\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/stat/STATUS10\",\n", " '{\"StatusSNS\":{\"Shutter1\":{\"Position\":1,\"Direction\":0}}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"open\"\n", " assert state.attributes[\"current_position\"] == 1\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/stat/STATUS10\",\n", " '{\"StatusSNS\":{\"Shutter1\":{\"Position\":100,\"Direction\":0}}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"open\"\n", " assert state.attributes[\"current_position\"] == 100\n", "\n", " # Command response\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/stat/RESULT\",\n", " '{\"Shutter1\":{\"Position\":54,\"Direction\":-1}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"closing\"\n", " assert state.attributes[\"current_position\"] == 54\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/stat/RESULT\",\n", " '{\"Shutter1\":{\"Position\":100,\"Direction\":1}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"opening\"\n", " assert state.attributes[\"current_position\"] == 100\n", "\n", " async_fire_mqtt_message(\n", " hass, \"tasmota_49A3BC/stat/RESULT\", '{\"Shutter1\":{\"Position\":0,\"Direction\":0}}'\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"closed\"\n", " assert state.attributes[\"current_position\"] == 0\n", "\n", " async_fire_mqtt_message(\n", " hass, \"tasmota_49A3BC/stat/RESULT\", '{\"Shutter1\":{\"Position\":1,\"Direction\":0}}'\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"open\"\n", " assert state.attributes[\"current_position\"] == 1\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/stat/RESULT\",\n", " '{\"Shutter1\":{\"Position\":100,\"Direction\":0}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"open\"\n", " assert state.attributes[\"current_position\"] == 100\n", "\n", "\n", "async def test_controlling_state_via_mqtt_inverted(hass, mqtt_mock, setup_tasmota):\n", " \"\"\"Test state update via MQTT.\"\"\"\n", " config = copy.deepcopy(DEFAULT_CONFIG)\n", " config[\"rl\"][0] = 3\n", " config[\"rl\"][1] = 3\n", " config[\"sho\"] = [1] # Inverted cover\n", " mac = config[\"mac\"]\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " f\"{DEFAULT_PREFIX}/{mac}/config\",\n", " json.dumps(config),\n", " )\n", " await hass.async_block_till_done()\n", "\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"unavailable\"\n", " assert not state.attributes.get(ATTR_ASSUMED_STATE)\n", "\n", " async_fire_mqtt_message(hass, \"tasmota_49A3BC/tele/LWT\", \"Online\")\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == STATE_UNKNOWN\n", " assert (\n", " state.attributes[\"supported_features\"]\n", " == cover.SUPPORT_OPEN\n", " | cover.SUPPORT_CLOSE\n", " | cover.SUPPORT_STOP\n", " | cover.SUPPORT_SET_POSITION\n", " )\n", " assert not state.attributes.get(ATTR_ASSUMED_STATE)\n", "\n", " # Periodic updates\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/tele/SENSOR\",\n", " '{\"Shutter1\":{\"Position\":54,\"Direction\":-1}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"opening\"\n", " assert state.attributes[\"current_position\"] == 46\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/tele/SENSOR\",\n", " '{\"Shutter1\":{\"Position\":100,\"Direction\":1}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"closing\"\n", " assert state.attributes[\"current_position\"] == 0\n", "\n", " async_fire_mqtt_message(\n", " hass, \"tasmota_49A3BC/tele/SENSOR\", '{\"Shutter1\":{\"Position\":0,\"Direction\":0}}'\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"open\"\n", " assert state.attributes[\"current_position\"] == 100\n", "\n", " async_fire_mqtt_message(\n", " hass, \"tasmota_49A3BC/tele/SENSOR\", '{\"Shutter1\":{\"Position\":99,\"Direction\":0}}'\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"open\"\n", " assert state.attributes[\"current_position\"] == 1\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/tele/SENSOR\",\n", " '{\"Shutter1\":{\"Position\":100,\"Direction\":0}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"closed\"\n", " assert state.attributes[\"current_position\"] == 0\n", "\n", " # State poll response\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/stat/STATUS10\",\n", " '{\"StatusSNS\":{\"Shutter1\":{\"Position\":54,\"Direction\":-1}}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"opening\"\n", " assert state.attributes[\"current_position\"] == 46\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/stat/STATUS10\",\n", " '{\"StatusSNS\":{\"Shutter1\":{\"Position\":100,\"Direction\":1}}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"closing\"\n", " assert state.attributes[\"current_position\"] == 0\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/stat/STATUS10\",\n", " '{\"StatusSNS\":{\"Shutter1\":{\"Position\":0,\"Direction\":0}}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"open\"\n", " assert state.attributes[\"current_position\"] == 100\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/stat/STATUS10\",\n", " '{\"StatusSNS\":{\"Shutter1\":{\"Position\":99,\"Direction\":0}}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"open\"\n", " assert state.attributes[\"current_position\"] == 1\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/stat/STATUS10\",\n", " '{\"StatusSNS\":{\"Shutter1\":{\"Position\":100,\"Direction\":0}}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"closed\"\n", " assert state.attributes[\"current_position\"] == 0\n", "\n", " # Command response\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/stat/RESULT\",\n", " '{\"Shutter1\":{\"Position\":54,\"Direction\":-1}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"opening\"\n", " assert state.attributes[\"current_position\"] == 46\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/stat/RESULT\",\n", " '{\"Shutter1\":{\"Position\":100,\"Direction\":1}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"closing\"\n", " assert state.attributes[\"current_position\"] == 0\n", "\n", " async_fire_mqtt_message(\n", " hass, \"tasmota_49A3BC/stat/RESULT\", '{\"Shutter1\":{\"Position\":0,\"Direction\":0}}'\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"open\"\n", " assert state.attributes[\"current_position\"] == 100\n", "\n", " async_fire_mqtt_message(\n", " hass, \"tasmota_49A3BC/stat/RESULT\", '{\"Shutter1\":{\"Position\":1,\"Direction\":0}}'\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"open\"\n", " assert state.attributes[\"current_position\"] == 99\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " \"tasmota_49A3BC/stat/RESULT\",\n", " '{\"Shutter1\":{\"Position\":100,\"Direction\":0}}',\n", " )\n", " state = hass.states.get(\"cover.tasmota_cover_1\")\n", " assert state.state == \"closed\"\n", " assert state.attributes[\"current_position\"] == 0\n", "\n", "\n", "async def call_service(hass, entity_id, service, **kwargs):\n", " \"\"\"Call a fan service.\"\"\"\n", " await hass.services.async_call(\n", " cover.DOMAIN,\n", " service,\n", " {\"entity_id\": entity_id, **kwargs},\n", " blocking=True,\n", " )\n", "\n", "\n", "async def test_sending_mqtt_commands(hass, mqtt_mock, setup_tasmota):\n", " \"\"\"Test the sending MQTT commands.\"\"\"\n", " config = copy.deepcopy(DEFAULT_CONFIG)\n", " config[\"dn\"] = \"Test\"\n", " config[\"rl\"][0] = 3\n", " config[\"rl\"][1] = 3\n", " mac = config[\"mac\"]\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " f\"{DEFAULT_PREFIX}/{mac}/config\",\n", " json.dumps(config),\n", " )\n", " await hass.async_block_till_done()\n", "\n", " async_fire_mqtt_message(hass, \"tasmota_49A3BC/tele/LWT\", \"Online\")\n", " state = hass.states.get(\"cover.test_cover_1\")\n", " assert state.state == STATE_UNKNOWN\n", " await hass.async_block_till_done()\n", " await hass.async_block_till_done()\n", " mqtt_mock.async_publish.reset_mock()\n", "\n", " # Close the cover and verify MQTT message is sent\n", " await call_service(hass, \"cover.test_cover_1\", \"close_cover\")\n", " mqtt_mock.async_publish.assert_called_once_with(\n", " \"tasmota_49A3BC/cmnd/ShutterClose1\", \"\", 0, False\n", " )\n", " mqtt_mock.async_publish.reset_mock()\n", "\n", " # Tasmota is not optimistic, the state should still be unknown\n", " state = hass.states.get(\"cover.test_cover_1\")\n", " assert state.state == STATE_UNKNOWN\n", "\n", " # Open the cover and verify MQTT message is sent\n", " await call_service(hass, \"cover.test_cover_1\", \"open_cover\")\n", " mqtt_mock.async_publish.assert_called_once_with(\n", " \"tasmota_49A3BC/cmnd/ShutterOpen1\", \"\", 0, False\n", " )\n", " mqtt_mock.async_publish.reset_mock()\n", "\n", " # Stop the cover and verify MQTT message is sent\n", " await call_service(hass, \"cover.test_cover_1\", \"stop_cover\")\n", " mqtt_mock.async_publish.assert_called_once_with(\n", " \"tasmota_49A3BC/cmnd/ShutterStop1\", \"\", 0, False\n", " )\n", " mqtt_mock.async_publish.reset_mock()\n", "\n", " # Set position and verify MQTT message is sent\n", " await call_service(hass, \"cover.test_cover_1\", \"set_cover_position\", position=0)\n", " mqtt_mock.async_publish.assert_called_once_with(\n", " \"tasmota_49A3BC/cmnd/ShutterPosition1\", \"0\", 0, False\n", " )\n", " mqtt_mock.async_publish.reset_mock()\n", "\n", " # Set position and verify MQTT message is sent\n", " await call_service(hass, \"cover.test_cover_1\", \"set_cover_position\", position=99)\n", " mqtt_mock.async_publish.assert_called_once_with(\n", " \"tasmota_49A3BC/cmnd/ShutterPosition1\", \"99\", 0, False\n", " )\n", " mqtt_mock.async_publish.reset_mock()\n", "\n", "\n", "async def test_sending_mqtt_commands_inverted(hass, mqtt_mock, setup_tasmota):\n", " \"\"\"Test the sending MQTT commands.\"\"\"\n", " config = copy.deepcopy(DEFAULT_CONFIG)\n", " config[\"dn\"] = \"Test\"\n", " config[\"rl\"][0] = 3\n", " config[\"rl\"][1] = 3\n", " config[\"sho\"] = [1] # Inverted cover\n", " mac = config[\"mac\"]\n", "\n", " async_fire_mqtt_message(\n", " hass,\n", " f\"{DEFAULT_PREFIX}/{mac}/config\",\n", " json.dumps(config),\n", " )\n", " await hass.async_block_till_done()\n", "\n", " async_fire_mqtt_message(hass, \"tasmota_49A3BC/tele/LWT\", \"Online\")\n", " state = hass.states.get(\"cover.test_cover_1\")\n", " assert state.state == STATE_UNKNOWN\n", " await hass.async_block_till_done()\n", " await hass.async_block_till_done()\n", " mqtt_mock.async_publish.reset_mock()\n", "\n", " # Close the cover and verify MQTT message is sent\n", " await call_service(hass, \"cover.test_cover_1\", \"close_cover\")\n", " mqtt_mock.async_publish.assert_called_once_with(\n", " \"tasmota_49A3BC/cmnd/ShutterClose1\", \"\", 0, False\n", " )\n", " mqtt_mock.async_publish.reset_mock()\n", "\n", " # Tasmota is not optimistic, the state should still be unknown\n", " state = hass.states.get(\"cover.test_cover_1\")\n", " assert state.state == STATE_UNKNOWN\n", "\n", " # Open the cover and verify MQTT message is sent\n", " await call_service(hass, \"cover.test_cover_1\", \"open_cover\")\n", " mqtt_mock.async_publish.assert_called_once_with(\n", " \"tasmota_49A3BC/cmnd/ShutterOpen1\", \"\", 0, False\n", " )\n", " mqtt_mock.async_publish.reset_mock()\n", "\n", " # Stop the cover and verify MQTT message is sent\n", " await call_service(hass, \"cover.test_cover_1\", \"stop_cover\")\n", " mqtt_mock.async_publish.assert_called_once_with(\n", " \"tasmota_49A3BC/cmnd/ShutterStop1\", \"\", 0, False\n", " )\n", " mqtt_mock.async_publish.reset_mock()\n", "\n", " # Set position and verify MQTT message is sent\n", " await call_service(hass, \"cover.test_cover_1\", \"set_cover_position\", position=0)\n", " mqtt_mock.async_publish.assert_called_once_with(\n", " \"tasmota_49A3BC/cmnd/ShutterPosition1\", \"100\", 0, False\n", " )\n", " mqtt_mock.async_publish.reset_mock()\n", "\n", " # Set position and verify MQTT message is sent\n", " await call_service(hass, \"cover.test_cover_1\", \"set_cover_position\", position=99)\n", " mqtt_mock.async_publish.assert_called_once_with(\n", " \"tasmota_49A3BC/cmnd/ShutterPosition1\", \"1\", 0, False\n", " )\n", " mqtt_mock.async_publish.reset_mock()\n", "\n", "\n", "async def test_availability_when_connection_lost(\n", " hass, mqtt_client_mock, mqtt_mock, setup_tasmota\n", "):\n", " \"\"\"Test availability after MQTT disconnection.\"\"\"\n", " config = copy.deepcopy(DEFAULT_CONFIG)\n", " config[\"dn\"] = \"Test\"\n", " config[\"rl\"][0] = 3\n", " config[\"rl\"][1] = 3\n", " await help_test_availability_when_connection_lost(\n", " hass,\n", " mqtt_client_mock,\n", " mqtt_mock,\n", " cover.DOMAIN,\n", " config,\n", " entity_id=\"test_cover_1\",\n", " )\n", "\n", "\n", "async def test_availability(hass, mqtt_mock, setup_tasmota):\n", " \"\"\"Test availability.\"\"\"\n", " config = copy.deepcopy(DEFAULT_CONFIG)\n", " config[\"dn\"] = \"Test\"\n", " config[\"rl\"][0] = 3\n", " config[\"rl\"][1] = 3\n", " await help_test_availability(\n", " hass, mqtt_mock, cover.DOMAIN, config, entity_id=\"test_cover_1\"\n", " )\n", "\n", "\n", "async def test_availability_discovery_update(hass, mqtt_mock, setup_tasmota):\n", " \"\"\"Test availability discovery update.\"\"\"\n", " config = copy.deepcopy(DEFAULT_CONFIG)\n", " config[\"dn\"] = \"Test\"\n", " config[\"rl\"][0] = 3\n", " config[\"rl\"][1] = 3\n", " await help_test_availability_discovery_update(\n", " hass, mqtt_mock, cover.DOMAIN, config, entity_id=\"test_cover_1\"\n", " )\n", "\n", "\n", "async def test_availability_poll_state(\n", " hass, mqtt_client_mock, mqtt_mock, setup_tasmota\n", "):\n", " \"\"\"Test polling after MQTT connection (re)established.\"\"\"\n", " config = copy.deepcopy(DEFAULT_CONFIG)\n", " config[\"rl\"][0] = 3\n", " config[\"rl\"][1] = 3\n", " poll_topic = \"tasmota_49A3BC/cmnd/STATUS\"\n", " await help_test_availability_poll_state(\n", " hass, mqtt_client_mock, mqtt_mock, cover.DOMAIN, config, poll_topic, \"10\"\n", " )\n", "\n", "\n", "async def test_discovery_removal_cover(hass, mqtt_mock, caplog, setup_tasmota):\n", " \"\"\"Test removal of discovered cover.\"\"\"\n", " config1 = copy.deepcopy(DEFAULT_CONFIG)\n", " config1[\"dn\"] = \"Test\"\n", " config1[\"rl\"][0] = 3\n", " config1[\"rl\"][1] = 3\n", " config2 = copy.deepcopy(DEFAULT_CONFIG)\n", " config2[\"dn\"] = \"Test\"\n", " config2[\"rl\"][0] = 0\n", " config2[\"rl\"][1] = 0\n", "\n", " await help_test_discovery_removal(\n", " hass,\n", " mqtt_mock,\n", " caplog,\n", " cover.DOMAIN,\n", " config1,\n", " config2,\n", " entity_id=\"test_cover_1\",\n", " name=\"Test cover 1\",\n", " )\n", "\n", "\n", "async def test_discovery_update_unchanged_cover(hass, mqtt_mock, caplog, setup_tasmota):\n", " \"\"\"Test update of discovered cover.\"\"\"\n", " config = copy.deepcopy(DEFAULT_CONFIG)\n", " config[\"dn\"] = \"Test\"\n", " config[\"rl\"][0] = 3\n", " config[\"rl\"][1] = 3\n", " with patch(\n", " \"homeassistant.components.tasmota.cover.TasmotaCover.discovery_update\"\n", " ) as discovery_update:\n", " await help_test_discovery_update_unchanged(\n", " hass,\n", " mqtt_mock,\n", " caplog,\n", " cover.DOMAIN,\n", " config,\n", " discovery_update,\n", " entity_id=\"test_cover_1\",\n", " name=\"Test cover 1\",\n", " )\n", "\n", "\n", "async def test_discovery_device_remove(hass, mqtt_mock, setup_tasmota):\n", " \"\"\"Test device registry remove.\"\"\"\n", " config = copy.deepcopy(DEFAULT_CONFIG)\n", " config[\"dn\"] = \"Test\"\n", " config[\"rl\"][0] = 3\n", " config[\"rl\"][1] = 3\n", " unique_id = f\"{DEFAULT_CONFIG['mac']}_cover_shutter_0\"\n", " await help_test_discovery_device_remove(\n", " hass, mqtt_mock, cover.DOMAIN, unique_id, config\n", " )\n", "\n", "\n", "async def test_entity_id_update_subscriptions(hass, mqtt_mock, setup_tasmota):\n", " \"\"\"Test MQTT subscriptions are managed when entity_id is updated.\"\"\"\n", " config = copy.deepcopy(DEFAULT_CONFIG)\n", " config[\"dn\"] = \"Test\"\n", " config[\"rl\"][0] = 3\n", " config[\"rl\"][1] = 3\n", " topics = [\n", " get_topic_stat_result(config),\n", " get_topic_tele_sensor(config),\n", " get_topic_stat_status(config, 10),\n", " get_topic_tele_will(config),\n", " ]\n", " await help_test_entity_id_update_subscriptions(\n", " hass, mqtt_mock, cover.DOMAIN, config, topics, entity_id=\"test_cover_1\"\n", " )\n", "\n", "\n", "async def test_entity_id_update_discovery_update(hass, mqtt_mock, setup_tasmota):\n", " \"\"\"Test MQTT discovery update when entity_id is updated.\"\"\"\n", " config = copy.deepcopy(DEFAULT_CONFIG)\n", " config[\"dn\"] = \"Test\"\n", " config[\"rl\"][0] = 3\n", " config[\"rl\"][1] = 3\n", " await help_test_entity_id_update_discovery_update(\n", " hass, mqtt_mock, cover.DOMAIN, config, entity_id=\"test_cover_1\"\n", " )\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0.011235955056179775, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011235955056179775, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0 ]
629
0.000294
#!/usr/bin/env python # -*- coding: utf-8 -*- import datetime import os import sys from setuptools import setup from setuptools.command.develop import develop with open('README.rst') as readme_file: readme = readme_file.read() def version(): return datetime.datetime.utcnow().strftime('%Y.%m.%d') class SetupDevelop(develop): """ Setup the development environemnt with: `./setup.py develop` """ def finalize_options(self): if not os.getenv('VIRTUAL_ENV'): print('ERROR: You must be in a virtual environment', sys.stderr) sys.exit(1) develop.finalize_options(self) def run(self): develop.run(self) # Install the dev requirements print('>>> Install dev requirements') self.spawn('pip install --upgrade --requirement requirements/dev.txt'.split(' ')) print('<<< Instell dev requirements') setup( name='btrsync', version=version(), description="Rsync + btrfs archiving utility.", long_description=readme, author='zeroxoneb', author_email='zeroxoneb@gmail.com', url='https://github.com/zeroxoneb/btrsync', packages=[ 'btrsync', ], package_dir={'btrsync': 'btrsync'}, entry_points={ 'console_scripts': [ 'btrsync = btrsync.cli:sync', ] }, include_package_data=True, install_requires=[ 'click==7.0', ], license="MIT", zip_safe=False, keywords='btrsync', classifiers=[ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', ], cmdclass={ 'develop': SetupDevelop } )
[ "#!/usr/bin/env python\n", "# -*- coding: utf-8 -*-\n", "\n", "import datetime\n", "import os\n", "import sys\n", "\n", "from setuptools import setup\n", "from setuptools.command.develop import develop\n", "\n", "\n", "with open('README.rst') as readme_file:\n", " readme = readme_file.read()\n", "\n", "\n", "def version():\n", " return datetime.datetime.utcnow().strftime('%Y.%m.%d')\n", "\n", "\n", "class SetupDevelop(develop):\n", " \"\"\"\n", " Setup the development environemnt with: `./setup.py develop`\n", " \"\"\"\n", "\n", " def finalize_options(self):\n", " if not os.getenv('VIRTUAL_ENV'):\n", " print('ERROR: You must be in a virtual environment', sys.stderr)\n", " sys.exit(1)\n", " develop.finalize_options(self)\n", "\n", " def run(self):\n", " develop.run(self)\n", "\n", " # Install the dev requirements\n", " print('>>> Install dev requirements')\n", " self.spawn('pip install --upgrade --requirement requirements/dev.txt'.split(' '))\n", " print('<<< Instell dev requirements')\n", "\n", "\n", "setup(\n", " name='btrsync',\n", " version=version(),\n", " description=\"Rsync + btrfs archiving utility.\",\n", " long_description=readme,\n", " author='zeroxoneb',\n", " author_email='zeroxoneb@gmail.com',\n", " url='https://github.com/zeroxoneb/btrsync',\n", " packages=[\n", " 'btrsync',\n", " ],\n", " package_dir={'btrsync':\n", " 'btrsync'},\n", " entry_points={\n", " 'console_scripts': [\n", " 'btrsync = btrsync.cli:sync',\n", " ]\n", " },\n", " include_package_data=True,\n", " install_requires=[\n", " 'click==7.0',\n", " ],\n", " license=\"MIT\",\n", " zip_safe=False,\n", " keywords='btrsync',\n", " classifiers=[\n", " 'Intended Audience :: Developers',\n", " 'License :: OSI Approved :: MIT License',\n", " 'Programming Language :: Python :: 3',\n", " ],\n", " cmdclass={\n", " 'develop': SetupDevelop\n", " }\n", ")\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
73
0.000152
from Adafruit_LED_Backpack import SevenSegment class Displays(): def __init__(self): # Change busnum to 0 if using a pi older than B+ self.blueDisplay = SevenSegment.SevenSegment(address=0x70, busnum = 1) self.redDisplay = SevenSegment.SevenSegment(address=0x71, busnum = 1) self.greenDisplay = SevenSegment.SevenSegment(address=0x72, busnum = 1) self.displayList = [self.blueDisplay, self.redDisplay, self.greenDisplay] # Needed to allow displays to work for display in self.displayList: display.begin() # Takes a set of 3 numbers and displays them in order: blue, red and green def displayTimes(self, timesList): for i, display in enumerate(self.displayList): display.clear() # Ensure that the the number fits on the displays # Adjust the digit place depending if the time is ten seconds or greater display.print_float(timesList[i] % 100, decimal_digits=(3 if timesList[i] < 10 else 2)) display.write_display() def clear(self): for display in self.displayList: display.clear() display.write_display() def displayHex(self, hexList): for i, display in enumerate(self.displayList): display.clear() display.print_hex(hexList[i]) display.write_display()
[ "from Adafruit_LED_Backpack import SevenSegment\n", "\n", "class Displays():\n", " def __init__(self):\n", " # Change busnum to 0 if using a pi older than B+\n", " self.blueDisplay = SevenSegment.SevenSegment(address=0x70, busnum = 1)\n", " self.redDisplay = SevenSegment.SevenSegment(address=0x71, busnum = 1)\n", " self.greenDisplay = SevenSegment.SevenSegment(address=0x72, busnum = 1)\n", "\n", " self.displayList = [self.blueDisplay, self.redDisplay, self.greenDisplay]\n", "\n", " # Needed to allow displays to work\n", " for display in self.displayList:\n", " display.begin()\n", "\n", " # Takes a set of 3 numbers and displays them in order: blue, red and green\n", " def displayTimes(self, timesList):\n", " for i, display in enumerate(self.displayList):\n", " display.clear()\n", "\n", " # Ensure that the the number fits on the displays\n", " # Adjust the digit place depending if the time is ten seconds or greater\n", "\n", " display.print_float(timesList[i] % 100, decimal_digits=(3 if timesList[i] < 10 else 2))\n", " display.write_display()\n", "\n", " def clear(self):\n", " for display in self.displayList:\n", " display.clear()\n", " display.write_display()\n", "\n", " def displayHex(self, hexList):\n", " for i, display in enumerate(self.displayList):\n", " display.clear()\n", " display.print_hex(hexList[i])\n", " display.write_display()\n" ]
[ 0, 0, 0.05555555555555555, 0, 0, 0.04938271604938271, 0.04938271604938271, 0.04938271604938271, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0.01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
36
0.006602
#!/usr/bin/env python """ This file is part of open-ihm. open-ihm is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. open-ihm is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with open-ihm. If not, see <http://www.gnu.org/licenses/>. """ # imports from PyQt4 package from PyQt4.QtCore import * from PyQt4.QtGui import * from PyQt4 import uic import operator # import packages Ui_HouseholdIncomeReport, base_class = uic.loadUiType("gui/designs/ui_report_householdincome.ui") from data.report_settingsmanager import ReportsSettingsManager from outputs.routines.report_householdsincome import HouseholdIncome from outputs.routines.report_householdsincome_write import HouseholdsIncomeWrite from outputs.routines.report_disposable_income import DisposableHouseholdIncome from mixins import MDIDialogMixin class HouseholdIncomeReport(QDialog, Ui_HouseholdIncomeReport, MDIDialogMixin): ''' Creates the Household Income Report by Source from. Uses the design class in gui.designs.ui_report_householdincome. ''' def __init__(self, parent): ''' Set up the dialog box interface ''' self.parent = parent QDialog.__init__(self) self.setupUi(self) self.parent = parent self.reporttype = self.cmbReportType.currentText() self.getProjectNames() self.putMainIncomeCategories() self.insertHouseholdsHeader() self.insertPCharsHeader() def updateDialogData(self): '''Update Income Sources list to those relevant for the current project''' self.putCropIncomeSources() self.getHouseholdCharacteristics() self.getPersonalCharacteristics() self.putHouseholdNames() self.putEmploymentIncomeSources() self.putLivestockIncomeSources() #self.putLoanSources() self.putTransferIncomeSources() self.putwildFoodIncomeSources() def getProjectNames(self): ''' populate projects combobox with available projects''' settingsmgr = ReportsSettingsManager() rows = settingsmgr.getProjectNames() for row in rows: project = row[0] self.cmbProjects.addItem(project) self.cmbProjects.setCurrentIndex(-1) def getselectedProject(self): ''' get name of project selected by user''' selectedproject = self.cmbProjects.currentText() return selectedproject def getProjectID(self): ''' get ID for the selected project''' selectedproject = self.getselectedProject() if selectedproject !="": settingsmgr = ReportsSettingsManager() selectedprojectid = settingsmgr.getSelectedProjectID(selectedproject) return selectedprojectid else: return 0 def putMainIncomeCategories(self): ''' Insert Income categories in the Income sources treeview''' categories = ['Crops','Employment','Livestock','Loans','Transfers','Wild Foods'] num = 0 model = QStandardItemModel() parent = QModelIndex() child = QModelIndex() parent = model.index( 0, 0 ) model.insertColumn(0, parent ) #one column for children model.insertRows( 0, 6, parent ) model.setHorizontalHeaderItem(0,QStandardItem('Income Sources')) for row in categories: child = model.index( num, 0, parent ) model.setData(child, row) num = num + 1 self.treeView.setModel(model) self.treeView.show() def getCropsIndex(self): '''Get index of Crops Category form the Dialog's TreeView''' cropsindex = self.treeView.model().index(0, 0) return cropsindex def putCropIncomeSources(self): '''Insert Crop Income Sources into the Household Income Dialog's TreeView''' projectid = self.getProjectID() settingsmgr = ReportsSettingsManager() rows = settingsmgr.getCropIncomeSources(projectid) numrows = len(rows) parent = self.getCropsIndex() self.treeView.model().insertColumn(0, parent ) self.treeView.model().insertRows( 0, numrows, parent ) num =0 for row in rows: child = self.treeView.model().index( num, 0, parent ) self.treeView.model().setData(child, row[0]) num = num + 1 def getEmploymentIndex(self): '''Get index of Employment Category form the Dialog's TreeView''' parentindex = self.treeView.model().index(1, 0) return parentindex def putEmploymentIncomeSources(self): '''Insert Employment Income Sources into the Household Income Dialog's TreeView''' projectid = self.getProjectID() settingsmgr = ReportsSettingsManager() rows = settingsmgr.getEmploymentIncomeSources(projectid) numrows = len(rows) parent = self.getEmploymentIndex() self.treeView.model().insertColumn(0, parent ) self.treeView.model().insertRows( 0, numrows, parent ) num =0 for row in rows: child = self.treeView.model().index( num, 0, parent ) self.treeView.model().setData(child, row[0]) num = num + 1 def getLivestockIndex(self): '''Get index of Livestock Category form the Dialog's TreeView''' parentindex = self.treeView.model().index(2, 0) return parentindex def putLivestockIncomeSources(self): '''Insert Livestock Income Sources into the Household Income Dialog's TreeView''' projectid = self.getProjectID() settingsmgr = ReportsSettingsManager() rows = settingsmgr.getLivestockIncomeSources(projectid) numrows = len(rows) parent = self.getLivestockIndex() self.treeView.model().insertColumn(0, parent ) self.treeView.model().insertRows( 0, numrows, parent ) num =0 for row in rows: child = self.treeView.model().index( num, 0, parent ) self.treeView.model().setData(child, row[0]) num = num + 1 def getLoansIndex(self): '''Get index of Loans Category form the Dialog's TreeView''' parentindex = self.treeView.model().index(3, 0) return parentindex def putLoanSources(self): '''Insert Loan Income Sources into the Household Income Dialog's TreeView''' projectid = self.getProjectID() settingsmgr = ReportsSettingsManager() rows = settingsmgr.getLoanIncomeSources(projectid) numrows = len(rows) parent = self.getLoansIndex() self.treeView.model().insertColumn(0, parent ) self.treeView.model().insertRows( 0, numrows, parent ) num =0 for row in rows: child = self.treeView.model().index( num, 0, parent ) self.treeView.model().setData(child, row[0]) num = num + 1 def getTransferIncomeIndex(self): '''Get index of Transfers Category form the Dialog's TreeView''' parentindex = self.treeView.model().index(4, 0) return parentindex def putTransferIncomeSources(self): '''Insert Transfer Income Sources into the Household Income Dialog's TreeView''' projectid = self.getProjectID() settingsmgr = ReportsSettingsManager() rows = settingsmgr.getTransferIncomeSources(projectid) numrows = len(rows) parent = self.getTransferIncomeIndex() self.treeView.model().insertColumn(0, parent ) self.treeView.model().insertRows( 0, numrows, parent ) num =0 for row in rows: child = self.treeView.model().index( num, 0, parent ) self.treeView.model().setData(child, row[0]) num = num + 1 def getWildFoodsIncomeIndex(self): '''Get index of Wild Foods Category form the Dialog's TreeView''' parentindex = self.treeView.model().index(5, 0) return parentindex def putwildFoodIncomeSources(self): '''Insert Wild Food Income Sources into the Household Income Dialog's TreeView''' projectid = self.getProjectID() settingsmgr = ReportsSettingsManager() rows = settingsmgr.getWildfoodsIncomeSources(projectid) numrows = len(rows) parent = self.getWildFoodsIncomeIndex() self.treeView.model().insertColumn(0, parent ) self.treeView.model().insertRows( 0, numrows, parent ) num =0 for row in rows: child = self.treeView.model().index( num, 0, parent ) self.treeView.model().setData(child, row[0]) num = num + 1 def getHouseholdNames(self): '''Get Names of Households selected by the User on the Interface''' projectid = self.getProjectID() settingsmgr = ReportsSettingsManager() rows = settingsmgr.getProjectHouseholds(projectid) return rows def insertHouseholdsHeader(self): '''Insert Title for treeViewHouseholds''' model = QStandardItemModel() model.setHorizontalHeaderItem(0,QStandardItem('Select Household Names')) self.treeViewHouseholds.setModel(model) self.treeViewHouseholds.show() def insertPCharsHeader(self): '''Insert Title for listViewHCharacteristics''' model = QStandardItemModel() model.setHorizontalHeaderItem(0,QStandardItem('Personal Characteristics')) self.listViewHCharacteristics.setModel(model) self.listViewHCharacteristics.show() def putHouseholdNames(self): ''' Insert household names for the selected Project''' hholdnames = self.getHouseholdNames() model = QStandardItemModel() parent = QModelIndex() name = 'All Households' numberofrows = len(hholdnames) model.insertRow(0,parent ) model.insertColumn(0, parent ) #one column for children parent = model.index( 0, 0 ) model.setData( parent, name ) #Insert project-specific household names as childred of the node 'All Households' parent = model.index(0, 0, QModelIndex()) model.insertColumn(0, parent ) model.insertRows( 0, numberofrows, parent ) num = 0 for row in hholdnames: child = model.index( num, 0, parent ) model.setData(child, row[0]) num = num + 1 model.setHorizontalHeaderItem(0,QStandardItem('Select Household Names')) self.treeViewHouseholds.setModel(model) self.treeViewHouseholds.show() def getHouseholdCharacteristics(self): ''' get household characteristics relevant to selected project''' projectid = self.getProjectID() settingsmgr = ReportsSettingsManager() rows = settingsmgr.getHouseholdCharacteristics(projectid) model = QStandardItemModel() num = 0 for row in rows: qtHCharacteristic = QStandardItem( "%s" % row[0]) qtHCharacteristic.setTextAlignment( Qt.AlignLeft ) if ((qtHCharacteristic.text() != 'hhid')and(qtHCharacteristic.text() != 'pid') ): model.setItem( num, 0, qtHCharacteristic ) num = num + 1 self.listViewHCharacteristics.setModel(model) self.listViewHCharacteristics.show() def getPersonalCharacteristics(self): ''' get personal characteristics relevant to the selected project''' projectid = self.getProjectID() settingsmgr = ReportsSettingsManager() rows = settingsmgr.getPersonalCharacteristics(projectid) model = QStandardItemModel() num = 0 for row in rows: qtPCharacteristic = QStandardItem( "%s" % row[0]) qtPCharacteristic.setTextAlignment( Qt.AlignLeft ) if ((qtPCharacteristic.text() != 'hhid')and(qtPCharacteristic.text() != 'personid') and (qtPCharacteristic.text() != 'pid')): model.setItem( num, 0, qtPCharacteristic ) num = num + 1 self.listViewPersonalCharacteristics.setModel(model) self.listViewPersonalCharacteristics.show() def getSelectedHouseholdCharacteristics(self): ''' get list of user selected household characteristics as part of the criteria for report generation''' selectedHChars = [] selectedIndexes = self.getSelectedHIndexes() for indexVal in selectedIndexes: currentitem = self.listViewHCharacteristics.model().item(indexVal.row(),0).text() if currentitem not in selectedHChars: selectedHChars.append(str(currentitem)) return selectedHChars def getSelectedHIndexes(self): return self.listViewHCharacteristics.selectedIndexes() def getSelectedPersonalCharacteristics(self): ''' get list of user selected householdpersonal characteristics as part of the criteria for report generation''' selectedRows = [] selectedIndexes = self.getSelectedPIndexes() for indexVal in selectedIndexes: currentitem = self.listViewPersonalCharacteristics.model().item(indexVal.row(),0).text() if currentitem not in selectedRows: selectedRows.append(str(currentitem)) return selectedRows def getSelectedPIndexes(self): '''Get indexes of selected Personal characteristics''' return self.listViewPersonalCharacteristics.selectedIndexes() def getSelectedHouseholdsIndexes(self): '''Get indexes of selected Household characteristics''' return self.treeViewHouseholds.selectedIndexes() def getReportHouseholdIDs (self): '''Get a list of households that match a users selection criteria -i.e Household names + Personal Characteristics and Household Characteristics''' selectedids = [] householdIDsQuery =self.getHouseholdIDsQuery() connector = HouseholdIncome() householdIDs = connector.getReportHouseholdIDs(householdIDsQuery) for hid in householdIDs: selectedids.append(str(hid[0])) return selectedids def getHouseholdIDsQuery(self): '''Get query for generating a list of households that match a users selection criteria''' projectid = self.getProjectID() selectedHChars = self.getSelectedHouseholdCharacteristics() selectedPChars = self.getSelectedPersonalCharacteristics() selectedhouseholds = self.getHouseholdsSelection() connector = HouseholdIncome() householdIDsQuery = connector.buildReportHouseholdIDsQuery(projectid,selectedhouseholds,selectedPChars,selectedHChars) return householdIDsQuery def getHouseholdsSelection(self): '''Get names of households selected by the user for charting''' selectedIndexes = self.getSelectedHouseholdsIndexes() parentIndex = self.treeViewHouseholds.model().index(0, 0, QModelIndex()) hholdnames = [] if len(selectedIndexes) != 0: if parentIndex in selectedIndexes: houses = self.getHouseholdNames() for house in houses: hholdnames.append(str(house[0])) else: for indexVal in selectedIndexes: currentitem = self.treeViewHouseholds.model().data(indexVal, Qt.DisplayRole).toString() hholdnames.append(str(currentitem)) else: QMessageBox.information(self,"Households By Income Source","No Households Selected") return hholdnames def getCropReportDetails(self): '''Get list of crops selected by the user for charting''' householdIDs = self.getReportHouseholdIDs() requiredDetailType =[] if len(householdIDs)!=0: parentIndex = self.getCropsIndex() selectedIndexes = self.getSelectedCropCriteria() if len(selectedIndexes) != 0: if parentIndex in selectedIndexes: requiredDetailType.append('All') else: for indexVal in selectedIndexes: currentitem = self.treeView.model().data(indexVal, Qt.DisplayRole).toString() requiredDetailType.append(str(currentitem)) return requiredDetailType def getSelectedCropCriteria(self): selectedIncomeIndexes = self.treeView.selectedIndexes() cropsroot = self.getCropsIndex() cropincomeIndexes = [] for indexVal in selectedIncomeIndexes: if (indexVal == cropsroot) or (indexVal.parent() == cropsroot): if indexVal not in cropincomeIndexes: cropincomeIndexes.append(indexVal) return cropincomeIndexes def getEmploymentReportDetails(self): householdIDs = self.getReportHouseholdIDs() requiredDetailType =[] if len(householdIDs)!=0: parentIndex = self.getEmploymentIndex() selectedIndexes = self.getSelectedEmploymentCriteria() if len(selectedIndexes) != 0: if parentIndex in selectedIndexes: requiredDetailType.append('All') else: for indexVal in selectedIndexes: currentitem = self.treeView.model().data(indexVal, Qt.DisplayRole).toString() requiredDetailType.append(str(currentitem)) return requiredDetailType def getSelectedEmploymentCriteria(self): selectedIncomeIndexes = self.treeView.selectedIndexes() root = self.getEmploymentIndex() incomeIndexes = [] for indexVal in selectedIncomeIndexes: if (indexVal == root) or (indexVal.parent() == root): if indexVal not in incomeIndexes: incomeIndexes.append(indexVal) return incomeIndexes def getLivestockReportDetails(self): householdIDs = self.getReportHouseholdIDs() requiredDetailType =[] if len(householdIDs)!=0: parentIndex = self.getLivestockIndex() selectedIndexes = self.getSelectedLivestockCriteria() if len(selectedIndexes) != 0: if parentIndex in selectedIndexes: requiredDetailType.append('All') else: for indexVal in selectedIndexes: currentitem = self.treeView.model().data(indexVal, Qt.DisplayRole).toString() requiredDetailType.append(str(currentitem)) return requiredDetailType def getSelectedLivestockCriteria(self): selectedIncomeIndexes = self.treeView.selectedIndexes() root = self.getLivestockIndex() incomeIndexes = [] for indexVal in selectedIncomeIndexes: if (indexVal == root) or (indexVal.parent() == root): if indexVal not in incomeIndexes: incomeIndexes.append(indexVal) return incomeIndexes def getLoansReportDetails(self): householdIDs = self.getReportHouseholdIDs() requiredDetailType =[] if len(householdIDs)!=0: parentIndex = self.getLoansIndex() selectedIndexes = self.getSelectedLoansCriteria() if len(selectedIndexes) != 0: if parentIndex in selectedIndexes: requiredDetailType.append('All') else: for indexVal in selectedIndexes: currentitem = self.treeView.model().data(indexVal, Qt.DisplayRole).toString() requiredDetailType.append(str(currentitem)) return requiredDetailType def getSelectedLoansCriteria(self): selectedIncomeIndexes = self.treeView.selectedIndexes() root = self.getLoansIndex() incomeIndexes = [] for indexVal in selectedIncomeIndexes: if (indexVal == root) or (indexVal.parent() == root): if indexVal not in incomeIndexes: incomeIndexes.append(indexVal) return incomeIndexes def getTransfersDetails(self): householdIDs = self.getReportHouseholdIDs() requiredDetailType =[] if len(householdIDs)!=0: parentIndex = self.getTransferIncomeIndex() selectedIndexes = self.getSelectedTransfersCriteria() if len(selectedIndexes) != 0: if parentIndex in selectedIndexes: requiredDetailType.append('All') else: for indexVal in selectedIndexes: currentitem = self.treeView.model().data(indexVal, Qt.DisplayRole).toString() requiredDetailType.append(str(currentitem)) return requiredDetailType def getSelectedTransfersCriteria(self): selectedIncomeIndexes = self.treeView.selectedIndexes() root = self.getTransferIncomeIndex() incomeIndexes = [] for indexVal in selectedIncomeIndexes: if (indexVal == root) or (indexVal.parent() == root): if indexVal not in incomeIndexes: incomeIndexes.append(indexVal) return incomeIndexes def getWildFoodDetails(self): householdIDs = self.getReportHouseholdIDs() requiredDetailType =[] if len(householdIDs)!=0: parentIndex = self.getWildFoodsIncomeIndex() selectedIndexes = self.getSelectedWildFoodsCriteria() if len(selectedIndexes) != 0: if parentIndex in selectedIndexes: requiredDetailType.append('All') else: for indexVal in selectedIndexes: currentitem = self.treeView.model().data(indexVal, Qt.DisplayRole).toString() requiredDetailType.append(str(currentitem)) return requiredDetailType def getSelectedWildFoodsCriteria(self): selectedIncomeIndexes = self.treeView.selectedIndexes() root = self.getWildFoodsIncomeIndex() incomeIndexes = [] for indexVal in selectedIncomeIndexes: if (indexVal == root) or (indexVal.parent() == root): if indexVal not in incomeIndexes: incomeIndexes.append(indexVal) return incomeIndexes def getReportTable (self): pid = self.getProjectID() reporttype = self.setReportType() reportQuery =self.getFinalReportTableQuery() connector = HouseholdIncome() reportTable = connector.getReportTable(reportQuery,pid,reporttype) return reportTable def getFinalReportTableQuery(self): projectid = self.getProjectID() householdIDs = self.getDISortedHouseholdIDs() cropdetails = self.getCropReportDetails() employmentdetails = self.getEmploymentReportDetails() livestockdetails = self.getLivestockReportDetails() loandetails = self.getLoansReportDetails() transferdetails = self.getTransfersDetails() wildfoodsdetails = self.getWildFoodDetails() reporttype = self.setReportType() connector = HouseholdIncome() householdIDsQuery = connector.getFinalIncomeReportTableQuery(reporttype,projectid,householdIDs,cropdetails,employmentdetails, livestockdetails,loandetails,transferdetails,wildfoodsdetails ) return householdIDsQuery def writeTable(self): reporttable= self.getReportTable() writer = HouseholdsIncomeWrite() reporttype = self.setReportType() writer.writeSpreadsheetReport(reporttable,reporttype) def setReportType(self): reporttype = self.cmbReportType.currentText() return reporttype def getDISortedHouseholdIDs(self): """ Returns list of household IDs ordered according to DI/AE, from poorest to richest""" householdIDs = self.getReportHouseholdIDs() pid = self.getProjectID() reporttype = 'DI/AE' connector = DisposableHouseholdIncome() disposableincome = connector.householdDisposableIncome(reporttype,pid,householdIDs) houseids = [] for row in disposableincome: houseids.append(row[0]) return houseids
[ "#!/usr/bin/env python\r\n", "\r\n", "\"\"\"\r\n", "This file is part of open-ihm.\r\n", "\r\n", "open-ihm is free software: you can redistribute it and/or modify it\r\n", "under the terms of the GNU General Public License as published by the\r\n", "Free Software Foundation, either version 3 of the License, or (at your\r\n", "option) any later version.\r\n", "\r\n", "open-ihm is distributed in the hope that it will be useful, but\r\n", "WITHOUT ANY WARRANTY; without even the implied warranty of\r\n", "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\r\n", "General Public License for more details.\r\n", "\r\n", "You should have received a copy of the GNU General Public License\r\n", "along with open-ihm. If not, see <http://www.gnu.org/licenses/>.\r\n", "\"\"\"\r\n", "\r\n", "\r\n", "# imports from PyQt4 package\r\n", "from PyQt4.QtCore import *\r\n", "from PyQt4.QtGui import *\r\n", "from PyQt4 import uic\r\n", "import operator\r\n", "\r\n", "# import packages\r\n", "Ui_HouseholdIncomeReport, base_class = uic.loadUiType(\"gui/designs/ui_report_householdincome.ui\")\r\n", "\r\n", "from data.report_settingsmanager import ReportsSettingsManager\r\n", "from outputs.routines.report_householdsincome import HouseholdIncome\r\n", "from outputs.routines.report_householdsincome_write import HouseholdsIncomeWrite\r\n", "from outputs.routines.report_disposable_income import DisposableHouseholdIncome\r\n", "\r\n", "from mixins import MDIDialogMixin\r\n", "\r\n", "class HouseholdIncomeReport(QDialog, Ui_HouseholdIncomeReport, MDIDialogMixin):\r\n", " ''' Creates the Household Income Report by Source from. Uses the design class\r\n", "\t\tin gui.designs.ui_report_householdincome. '''\t\r\n", "\t\r\n", " def __init__(self, parent):\r\n", " \r\n", "\t''' Set up the dialog box interface '''\r\n", "\tself.parent = parent\r\n", " QDialog.__init__(self)\r\n", " \tself.setupUi(self)\r\n", " self.parent = parent\r\n", " self.reporttype = self.cmbReportType.currentText()\r\n", "\r\n", " self.getProjectNames()\r\n", " self.putMainIncomeCategories()\r\n", " self.insertHouseholdsHeader()\r\n", " self.insertPCharsHeader()\r\n", " \r\n", " def updateDialogData(self):\r\n", " '''Update Income Sources list to those relevant for the current project'''\r\n", " self.putCropIncomeSources()\r\n", " self.getHouseholdCharacteristics()\r\n", " self.getPersonalCharacteristics()\r\n", " self.putHouseholdNames()\r\n", " self.putEmploymentIncomeSources()\r\n", " self.putLivestockIncomeSources()\r\n", " #self.putLoanSources()\r\n", " self.putTransferIncomeSources()\r\n", " self.putwildFoodIncomeSources()\r\n", "\r\n", " def getProjectNames(self):\r\n", " ''' populate projects combobox with available projects'''\r\n", " \r\n", " settingsmgr = ReportsSettingsManager()\r\n", " rows = settingsmgr.getProjectNames()\r\n", "\r\n", " for row in rows:\r\n", "\t\tproject = row[0]\r\n", " \tself.cmbProjects.addItem(project)\r\n", "\r\n", " self.cmbProjects.setCurrentIndex(-1)\r\n", "\r\n", " def getselectedProject(self):\r\n", " ''' get name of project selected by user'''\r\n", " \r\n", " selectedproject = self.cmbProjects.currentText()\r\n", " return selectedproject\r\n", " \r\n", " def getProjectID(self):\r\n", "\r\n", " ''' get ID for the selected project'''\r\n", " \r\n", " selectedproject = self.getselectedProject()\r\n", " if selectedproject !=\"\":\r\n", " settingsmgr = ReportsSettingsManager()\r\n", " selectedprojectid = settingsmgr.getSelectedProjectID(selectedproject)\r\n", " return selectedprojectid\r\n", " else: return 0\r\n", "\r\n", "\r\n", " def putMainIncomeCategories(self):\r\n", " ''' Insert Income categories in the Income sources treeview'''\r\n", "\r\n", " categories = ['Crops','Employment','Livestock','Loans','Transfers','Wild Foods']\r\n", " num = 0\r\n", " model = QStandardItemModel()\r\n", " parent = QModelIndex()\r\n", " child = QModelIndex()\r\n", "\r\n", " parent = model.index( 0, 0 )\r\n", " model.insertColumn(0, parent ) #one column for children\r\n", " model.insertRows( 0, 6, parent )\r\n", " model.setHorizontalHeaderItem(0,QStandardItem('Income Sources'))\r\n", " \r\n", " for row in categories:\r\n", " child = model.index( num, 0, parent )\r\n", " model.setData(child, row)\r\n", " num = num + 1\r\n", " \t\t\r\n", " self.treeView.setModel(model)\r\n", " self.treeView.show()\r\n", "\r\n", " def getCropsIndex(self):\r\n", " '''Get index of Crops Category form the Dialog's TreeView'''\r\n", " cropsindex = self.treeView.model().index(0, 0)\r\n", " return cropsindex\r\n", " \r\n", " def putCropIncomeSources(self):\r\n", " '''Insert Crop Income Sources into the Household Income Dialog's TreeView'''\r\n", " projectid = self.getProjectID()\r\n", " settingsmgr = ReportsSettingsManager()\r\n", " rows = settingsmgr.getCropIncomeSources(projectid)\r\n", " numrows = len(rows)\r\n", "\r\n", " parent = self.getCropsIndex()\r\n", " self.treeView.model().insertColumn(0, parent )\r\n", " self.treeView.model().insertRows( 0, numrows, parent )\r\n", "\r\n", " num =0\r\n", " \r\n", " for row in rows:\r\n", " child = self.treeView.model().index( num, 0, parent )\r\n", " self.treeView.model().setData(child, row[0])\r\n", " num = num + 1\r\n", "\r\n", " def getEmploymentIndex(self):\r\n", " '''Get index of Employment Category form the Dialog's TreeView'''\r\n", " parentindex = self.treeView.model().index(1, 0)\r\n", " return parentindex\r\n", "\r\n", " def putEmploymentIncomeSources(self):\r\n", " '''Insert Employment Income Sources into the Household Income Dialog's TreeView'''\r\n", " projectid = self.getProjectID()\r\n", " settingsmgr = ReportsSettingsManager()\r\n", " rows = settingsmgr.getEmploymentIncomeSources(projectid)\r\n", " numrows = len(rows)\r\n", "\r\n", " parent = self.getEmploymentIndex()\r\n", " self.treeView.model().insertColumn(0, parent )\r\n", " self.treeView.model().insertRows( 0, numrows, parent )\r\n", "\r\n", " num =0\r\n", " \r\n", " for row in rows:\r\n", " child = self.treeView.model().index( num, 0, parent )\r\n", " self.treeView.model().setData(child, row[0])\r\n", " num = num + 1\r\n", "\r\n", " def getLivestockIndex(self):\r\n", " '''Get index of Livestock Category form the Dialog's TreeView'''\r\n", " parentindex = self.treeView.model().index(2, 0)\r\n", " return parentindex\r\n", "\r\n", "\r\n", " def putLivestockIncomeSources(self):\r\n", " '''Insert Livestock Income Sources into the Household Income Dialog's TreeView'''\r\n", " \r\n", " projectid = self.getProjectID()\r\n", " settingsmgr = ReportsSettingsManager()\r\n", " rows = settingsmgr.getLivestockIncomeSources(projectid)\r\n", " numrows = len(rows)\r\n", "\r\n", " parent = self.getLivestockIndex()\r\n", " self.treeView.model().insertColumn(0, parent )\r\n", " self.treeView.model().insertRows( 0, numrows, parent )\r\n", "\r\n", " num =0\r\n", " \r\n", " for row in rows:\r\n", " child = self.treeView.model().index( num, 0, parent )\r\n", " self.treeView.model().setData(child, row[0])\r\n", " num = num + 1\r\n", "\r\n", " def getLoansIndex(self):\r\n", " '''Get index of Loans Category form the Dialog's TreeView'''\r\n", " parentindex = self.treeView.model().index(3, 0)\r\n", " return parentindex\r\n", "\r\n", " def putLoanSources(self):\r\n", " '''Insert Loan Income Sources into the Household Income Dialog's TreeView'''\r\n", " \r\n", " projectid = self.getProjectID()\r\n", " settingsmgr = ReportsSettingsManager()\r\n", " rows = settingsmgr.getLoanIncomeSources(projectid)\r\n", " numrows = len(rows)\r\n", "\r\n", " parent = self.getLoansIndex()\r\n", " self.treeView.model().insertColumn(0, parent )\r\n", " self.treeView.model().insertRows( 0, numrows, parent )\r\n", "\r\n", " num =0\r\n", " \r\n", " for row in rows:\r\n", " child = self.treeView.model().index( num, 0, parent )\r\n", " self.treeView.model().setData(child, row[0])\r\n", " num = num + 1\r\n", "\r\n", " def getTransferIncomeIndex(self):\r\n", " '''Get index of Transfers Category form the Dialog's TreeView'''\r\n", " parentindex = self.treeView.model().index(4, 0)\r\n", " return parentindex\r\n", "\r\n", " def putTransferIncomeSources(self):\r\n", " '''Insert Transfer Income Sources into the Household Income Dialog's TreeView'''\r\n", " \r\n", " projectid = self.getProjectID()\r\n", " settingsmgr = ReportsSettingsManager()\r\n", " rows = settingsmgr.getTransferIncomeSources(projectid)\r\n", " numrows = len(rows)\r\n", "\r\n", " parent = self.getTransferIncomeIndex()\r\n", " self.treeView.model().insertColumn(0, parent )\r\n", " self.treeView.model().insertRows( 0, numrows, parent )\r\n", "\r\n", " num =0\r\n", " \r\n", " for row in rows:\r\n", " child = self.treeView.model().index( num, 0, parent )\r\n", " self.treeView.model().setData(child, row[0])\r\n", " num = num + 1\r\n", "\r\n", " def getWildFoodsIncomeIndex(self):\r\n", " '''Get index of Wild Foods Category form the Dialog's TreeView'''\r\n", " \r\n", " parentindex = self.treeView.model().index(5, 0)\r\n", " return parentindex\r\n", "\r\n", " def putwildFoodIncomeSources(self):\r\n", " '''Insert Wild Food Income Sources into the Household Income Dialog's TreeView'''\r\n", " \r\n", " projectid = self.getProjectID()\r\n", " settingsmgr = ReportsSettingsManager()\r\n", " rows = settingsmgr.getWildfoodsIncomeSources(projectid)\r\n", " numrows = len(rows)\r\n", "\r\n", " parent = self.getWildFoodsIncomeIndex()\r\n", " self.treeView.model().insertColumn(0, parent )\r\n", " self.treeView.model().insertRows( 0, numrows, parent )\r\n", "\r\n", " num =0\r\n", " \r\n", " for row in rows:\r\n", " child = self.treeView.model().index( num, 0, parent )\r\n", " self.treeView.model().setData(child, row[0])\r\n", " num = num + 1\r\n", "\r\n", " def getHouseholdNames(self):\r\n", " '''Get Names of Households selected by the User on the Interface'''\r\n", " projectid = self.getProjectID()\r\n", " settingsmgr = ReportsSettingsManager()\r\n", " rows = settingsmgr.getProjectHouseholds(projectid)\r\n", " return rows\r\n", "\r\n", " def insertHouseholdsHeader(self):\r\n", " '''Insert Title for treeViewHouseholds'''\r\n", " model = QStandardItemModel()\r\n", " model.setHorizontalHeaderItem(0,QStandardItem('Select Household Names'))\r\n", " self.treeViewHouseholds.setModel(model)\r\n", " self.treeViewHouseholds.show()\t\r\n", "\r\n", " def insertPCharsHeader(self):\r\n", " '''Insert Title for listViewHCharacteristics'''\r\n", " \r\n", " model = QStandardItemModel()\r\n", " model.setHorizontalHeaderItem(0,QStandardItem('Personal Characteristics'))\r\n", " self.listViewHCharacteristics.setModel(model)\r\n", " self.listViewHCharacteristics.show()\t\r\n", "\r\n", "\r\n", " def putHouseholdNames(self):\r\n", " ''' Insert household names for the selected Project'''\r\n", "\r\n", " hholdnames = self.getHouseholdNames()\r\n", " model = QStandardItemModel()\r\n", " parent = QModelIndex()\r\n", " name = 'All Households'\r\n", " numberofrows = len(hholdnames)\r\n", "\r\n", " model.insertRow(0,parent )\r\n", " model.insertColumn(0, parent ) #one column for children\r\n", " parent = model.index( 0, 0 )\r\n", " model.setData( parent, name )\r\n", "\r\n", " #Insert project-specific household names as childred of the node 'All Households'\r\n", " parent = model.index(0, 0, QModelIndex())\r\n", " model.insertColumn(0, parent )\r\n", " model.insertRows( 0, numberofrows, parent )\r\n", " num = 0\r\n", " for row in hholdnames:\r\n", " \r\n", " child = model.index( num, 0, parent )\r\n", " model.setData(child, row[0])\r\n", " num = num + 1\r\n", " \t\t\r\n", " model.setHorizontalHeaderItem(0,QStandardItem('Select Household Names'))\r\n", " self.treeViewHouseholds.setModel(model)\r\n", " self.treeViewHouseholds.show()\t\r\n", "\r\n", " def getHouseholdCharacteristics(self):\r\n", " \r\n", " ''' get household characteristics relevant to selected project'''\r\n", " \r\n", " projectid = self.getProjectID()\r\n", " settingsmgr = ReportsSettingsManager()\r\n", " rows = settingsmgr.getHouseholdCharacteristics(projectid)\r\n", " model = QStandardItemModel()\r\n", "\tnum = 0\r\n", "\r\n", " for row in rows:\r\n", " \r\n", " qtHCharacteristic = QStandardItem( \"%s\" % row[0])\r\n", " qtHCharacteristic.setTextAlignment( Qt.AlignLeft )\r\n", " if ((qtHCharacteristic.text() != 'hhid')and(qtHCharacteristic.text() != 'pid') ):\r\n", " \r\n", " model.setItem( num, 0, qtHCharacteristic )\r\n", " num = num + 1\r\n", " \t\t\r\n", " self.listViewHCharacteristics.setModel(model)\r\n", " self.listViewHCharacteristics.show()\t\r\n", "\r\n", " def getPersonalCharacteristics(self):\r\n", " ''' get personal characteristics relevant to the selected project'''\r\n", " \r\n", " projectid = self.getProjectID()\r\n", " settingsmgr = ReportsSettingsManager()\r\n", " rows = settingsmgr.getPersonalCharacteristics(projectid)\r\n", " model = QStandardItemModel()\r\n", "\tnum = 0\r\n", " for row in rows:\r\n", " qtPCharacteristic = QStandardItem( \"%s\" % row[0])\r\n", " qtPCharacteristic.setTextAlignment( Qt.AlignLeft )\r\n", " \r\n", " if ((qtPCharacteristic.text() != 'hhid')and(qtPCharacteristic.text() != 'personid') and (qtPCharacteristic.text() != 'pid')):\r\n", " model.setItem( num, 0, qtPCharacteristic )\r\n", " num = num + 1\r\n", " \t\t\r\n", " self.listViewPersonalCharacteristics.setModel(model)\r\n", " self.listViewPersonalCharacteristics.show()\t\r\n", "\r\n", " def getSelectedHouseholdCharacteristics(self):\r\n", " ''' get list of user selected household characteristics as part of the criteria for report generation'''\r\n", "\t\t\r\n", "\tselectedHChars = []\r\n", "\tselectedIndexes = self.getSelectedHIndexes()\r\n", "\t\t\r\n", "\tfor indexVal in selectedIndexes:\r\n", " currentitem = self.listViewHCharacteristics.model().item(indexVal.row(),0).text()\r\n", "\t if currentitem not in selectedHChars:\r\n", "\t\tselectedHChars.append(str(currentitem))\r\n", "\treturn selectedHChars\r\n", "\r\n", " def getSelectedHIndexes(self):\r\n", " return self.listViewHCharacteristics.selectedIndexes()\r\n", "\r\n", " def getSelectedPersonalCharacteristics(self):\r\n", " ''' get list of user selected householdpersonal characteristics as part of the criteria for report generation'''\r\n", "\t\t\r\n", "\tselectedRows = []\r\n", "\tselectedIndexes = self.getSelectedPIndexes()\r\n", "\t\t\r\n", "\tfor indexVal in selectedIndexes:\r\n", " currentitem = self.listViewPersonalCharacteristics.model().item(indexVal.row(),0).text()\r\n", "\t if currentitem not in selectedRows:\r\n", "\t\tselectedRows.append(str(currentitem))\r\n", "\treturn selectedRows\r\n", "\r\n", " def getSelectedPIndexes(self):\r\n", " '''Get indexes of selected Personal characteristics'''\r\n", " return self.listViewPersonalCharacteristics.selectedIndexes()\r\n", "\r\n", " def getSelectedHouseholdsIndexes(self):\r\n", " '''Get indexes of selected Household characteristics'''\r\n", " return self.treeViewHouseholds.selectedIndexes()\r\n", " \r\n", " def getReportHouseholdIDs (self):\r\n", " '''Get a list of households that match a users selection criteria -i.e Household names + Personal Characteristics and Household Characteristics'''\r\n", " \r\n", " selectedids = []\r\n", " householdIDsQuery =self.getHouseholdIDsQuery()\r\n", " connector = HouseholdIncome()\r\n", " householdIDs = connector.getReportHouseholdIDs(householdIDsQuery)\r\n", " for hid in householdIDs:\r\n", " selectedids.append(str(hid[0]))\r\n", " return selectedids\r\n", "\r\n", " def getHouseholdIDsQuery(self):\r\n", " '''Get query for generating a list of households that match a users selection criteria'''\r\n", "\r\n", " projectid = self.getProjectID()\r\n", " selectedHChars = self.getSelectedHouseholdCharacteristics()\r\n", " selectedPChars = self.getSelectedPersonalCharacteristics()\r\n", " selectedhouseholds = self.getHouseholdsSelection()\r\n", " connector = HouseholdIncome()\r\n", " householdIDsQuery = connector.buildReportHouseholdIDsQuery(projectid,selectedhouseholds,selectedPChars,selectedHChars)\r\n", " return householdIDsQuery\r\n", "\r\n", " def getHouseholdsSelection(self):\r\n", " '''Get names of households selected by the user for charting'''\r\n", " \r\n", " selectedIndexes = self.getSelectedHouseholdsIndexes()\r\n", " parentIndex = self.treeViewHouseholds.model().index(0, 0, QModelIndex())\r\n", " hholdnames = []\r\n", " \r\n", " if len(selectedIndexes) != 0:\r\n", " if parentIndex in selectedIndexes:\r\n", " houses = self.getHouseholdNames()\r\n", " for house in houses:\r\n", " hholdnames.append(str(house[0]))\r\n", " else:\r\n", " for indexVal in selectedIndexes:\r\n", " currentitem = self.treeViewHouseholds.model().data(indexVal, Qt.DisplayRole).toString()\r\n", " hholdnames.append(str(currentitem))\r\n", " else:\r\n", " QMessageBox.information(self,\"Households By Income Source\",\"No Households Selected\")\r\n", " return hholdnames\r\n", "\r\n", " def getCropReportDetails(self):\r\n", " '''Get list of crops selected by the user for charting'''\r\n", " householdIDs = self.getReportHouseholdIDs()\r\n", " requiredDetailType =[]\r\n", " \r\n", " if len(householdIDs)!=0:\r\n", " parentIndex = self.getCropsIndex()\r\n", " selectedIndexes = self.getSelectedCropCriteria()\r\n", " if len(selectedIndexes) != 0:\r\n", " if parentIndex in selectedIndexes:\r\n", " requiredDetailType.append('All')\r\n", " else:\r\n", " for indexVal in selectedIndexes:\r\n", " currentitem = self.treeView.model().data(indexVal, Qt.DisplayRole).toString()\r\n", " requiredDetailType.append(str(currentitem))\r\n", " return requiredDetailType\r\n", " \r\n", " def getSelectedCropCriteria(self):\r\n", " selectedIncomeIndexes = self.treeView.selectedIndexes()\r\n", " cropsroot = self.getCropsIndex()\r\n", " cropincomeIndexes = []\r\n", " for indexVal in selectedIncomeIndexes:\r\n", " if (indexVal == cropsroot) or (indexVal.parent() == cropsroot):\r\n", " if indexVal not in cropincomeIndexes:\r\n", " cropincomeIndexes.append(indexVal)\r\n", " return cropincomeIndexes\r\n", "\r\n", " def getEmploymentReportDetails(self):\r\n", " householdIDs = self.getReportHouseholdIDs()\r\n", " requiredDetailType =[]\r\n", " \r\n", " if len(householdIDs)!=0:\r\n", " parentIndex = self.getEmploymentIndex()\r\n", " selectedIndexes = self.getSelectedEmploymentCriteria()\r\n", " if len(selectedIndexes) != 0:\r\n", " if parentIndex in selectedIndexes:\r\n", " requiredDetailType.append('All')\r\n", " else:\r\n", " for indexVal in selectedIndexes:\r\n", " currentitem = self.treeView.model().data(indexVal, Qt.DisplayRole).toString()\r\n", " requiredDetailType.append(str(currentitem))\r\n", " return requiredDetailType\r\n", " \r\n", " def getSelectedEmploymentCriteria(self):\r\n", " selectedIncomeIndexes = self.treeView.selectedIndexes()\r\n", " root = self.getEmploymentIndex()\r\n", " incomeIndexes = []\r\n", " for indexVal in selectedIncomeIndexes:\r\n", " if (indexVal == root) or (indexVal.parent() == root):\r\n", " if indexVal not in incomeIndexes:\r\n", " incomeIndexes.append(indexVal)\r\n", " return incomeIndexes\r\n", "\r\n", " def getLivestockReportDetails(self):\r\n", " householdIDs = self.getReportHouseholdIDs()\r\n", " requiredDetailType =[]\r\n", " \r\n", " if len(householdIDs)!=0:\r\n", " parentIndex = self.getLivestockIndex()\r\n", " selectedIndexes = self.getSelectedLivestockCriteria()\r\n", " if len(selectedIndexes) != 0:\r\n", " if parentIndex in selectedIndexes:\r\n", " requiredDetailType.append('All')\r\n", " else:\r\n", " for indexVal in selectedIndexes:\r\n", " currentitem = self.treeView.model().data(indexVal, Qt.DisplayRole).toString()\r\n", " requiredDetailType.append(str(currentitem))\r\n", " return requiredDetailType\r\n", " \r\n", " def getSelectedLivestockCriteria(self):\r\n", " selectedIncomeIndexes = self.treeView.selectedIndexes()\r\n", " root = self.getLivestockIndex()\r\n", " incomeIndexes = []\r\n", " for indexVal in selectedIncomeIndexes:\r\n", " if (indexVal == root) or (indexVal.parent() == root):\r\n", " if indexVal not in incomeIndexes:\r\n", " incomeIndexes.append(indexVal)\r\n", " return incomeIndexes\r\n", "\r\n", " def getLoansReportDetails(self):\r\n", " householdIDs = self.getReportHouseholdIDs()\r\n", " requiredDetailType =[]\r\n", " \r\n", " if len(householdIDs)!=0:\r\n", " parentIndex = self.getLoansIndex()\r\n", " selectedIndexes = self.getSelectedLoansCriteria()\r\n", " if len(selectedIndexes) != 0:\r\n", " if parentIndex in selectedIndexes:\r\n", " requiredDetailType.append('All')\r\n", " else:\r\n", " for indexVal in selectedIndexes:\r\n", " currentitem = self.treeView.model().data(indexVal, Qt.DisplayRole).toString()\r\n", " requiredDetailType.append(str(currentitem))\r\n", " return requiredDetailType\r\n", " \r\n", " def getSelectedLoansCriteria(self):\r\n", " selectedIncomeIndexes = self.treeView.selectedIndexes()\r\n", " root = self.getLoansIndex()\r\n", " incomeIndexes = []\r\n", " for indexVal in selectedIncomeIndexes:\r\n", " if (indexVal == root) or (indexVal.parent() == root):\r\n", " if indexVal not in incomeIndexes:\r\n", " incomeIndexes.append(indexVal)\r\n", " return incomeIndexes\r\n", "\r\n", " def getTransfersDetails(self):\r\n", " householdIDs = self.getReportHouseholdIDs()\r\n", " requiredDetailType =[]\r\n", " \r\n", " if len(householdIDs)!=0:\r\n", " parentIndex = self.getTransferIncomeIndex()\r\n", " selectedIndexes = self.getSelectedTransfersCriteria()\r\n", " if len(selectedIndexes) != 0:\r\n", " if parentIndex in selectedIndexes:\r\n", " requiredDetailType.append('All')\r\n", " else:\r\n", " for indexVal in selectedIndexes:\r\n", " currentitem = self.treeView.model().data(indexVal, Qt.DisplayRole).toString()\r\n", " requiredDetailType.append(str(currentitem))\r\n", " return requiredDetailType\r\n", " \r\n", " def getSelectedTransfersCriteria(self):\r\n", " selectedIncomeIndexes = self.treeView.selectedIndexes()\r\n", " root = self.getTransferIncomeIndex()\r\n", " incomeIndexes = []\r\n", " for indexVal in selectedIncomeIndexes:\r\n", " if (indexVal == root) or (indexVal.parent() == root):\r\n", " if indexVal not in incomeIndexes:\r\n", " incomeIndexes.append(indexVal)\r\n", " return incomeIndexes\r\n", "\r\n", " def getWildFoodDetails(self):\r\n", " householdIDs = self.getReportHouseholdIDs()\r\n", " requiredDetailType =[]\r\n", " \r\n", " if len(householdIDs)!=0:\r\n", " parentIndex = self.getWildFoodsIncomeIndex()\r\n", " selectedIndexes = self.getSelectedWildFoodsCriteria()\r\n", " if len(selectedIndexes) != 0:\r\n", " if parentIndex in selectedIndexes:\r\n", " requiredDetailType.append('All')\r\n", " else:\r\n", " for indexVal in selectedIndexes:\r\n", " currentitem = self.treeView.model().data(indexVal, Qt.DisplayRole).toString()\r\n", " requiredDetailType.append(str(currentitem))\r\n", " return requiredDetailType\r\n", " \r\n", " def getSelectedWildFoodsCriteria(self):\r\n", " selectedIncomeIndexes = self.treeView.selectedIndexes()\r\n", " root = self.getWildFoodsIncomeIndex()\r\n", " incomeIndexes = []\r\n", " for indexVal in selectedIncomeIndexes:\r\n", " if (indexVal == root) or (indexVal.parent() == root):\r\n", " if indexVal not in incomeIndexes:\r\n", " incomeIndexes.append(indexVal)\r\n", " return incomeIndexes\r\n", "\r\n", " def getReportTable (self):\r\n", "\r\n", " pid = self.getProjectID()\r\n", " reporttype = self.setReportType()\r\n", " reportQuery =self.getFinalReportTableQuery()\r\n", " connector = HouseholdIncome()\r\n", " reportTable = connector.getReportTable(reportQuery,pid,reporttype)\r\n", " return reportTable\r\n", "\r\n", " def getFinalReportTableQuery(self):\r\n", "\r\n", " projectid = self.getProjectID()\r\n", " householdIDs = self.getDISortedHouseholdIDs()\r\n", " cropdetails = self.getCropReportDetails()\r\n", " employmentdetails = self.getEmploymentReportDetails()\r\n", " livestockdetails = self.getLivestockReportDetails()\r\n", " loandetails = self.getLoansReportDetails()\r\n", " transferdetails = self.getTransfersDetails()\r\n", " wildfoodsdetails = self.getWildFoodDetails()\r\n", " reporttype = self.setReportType()\r\n", " \r\n", " connector = HouseholdIncome()\r\n", " householdIDsQuery = connector.getFinalIncomeReportTableQuery(reporttype,projectid,householdIDs,cropdetails,employmentdetails, livestockdetails,loandetails,transferdetails,wildfoodsdetails )\r\n", " return householdIDsQuery\r\n", "\r\n", " def writeTable(self):\r\n", " reporttable= self.getReportTable()\r\n", " writer = HouseholdsIncomeWrite()\r\n", " reporttype = self.setReportType()\r\n", " writer.writeSpreadsheetReport(reporttable,reporttype)\r\n", "\r\n", " def setReportType(self):\r\n", " reporttype = self.cmbReportType.currentText()\r\n", " return reporttype\r\n", " \r\n", " def getDISortedHouseholdIDs(self):\r\n", " \"\"\" Returns list of household IDs ordered according to DI/AE, from poorest to richest\"\"\"\r\n", " householdIDs = self.getReportHouseholdIDs()\r\n", " pid = self.getProjectID()\r\n", " reporttype = 'DI/AE'\r\n", " connector = DisposableHouseholdIncome()\r\n", " disposableincome = connector.householdDisposableIncome(reporttype,pid,householdIDs)\r\n", " houseids = []\r\n", " for row in disposableincome:\r\n", " houseids.append(row[0])\r\n", " return houseids\r\n", " \r\n", " \r\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010101010101010102, 0, 0.015625, 0.014285714285714285, 0.024390243902439025, 0.012345679012345678, 0, 0.02857142857142857, 0, 0.012345679012345678, 0.012048192771084338, 0.06, 0.6666666666666666, 0.030303030303030304, 0.1, 0.047619047619047616, 0.043478260869565216, 0.03125, 0.07142857142857142, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0.03125, 0, 0, 0, 0, 0, 0.05555555555555555, 0, 0, 0, 0, 0.1, 0.041666666666666664, 0, 0, 0, 0, 0, 0.05555555555555555, 0, 0, 0.05555555555555555, 0, 0, 0, 0.05555555555555555, 0, 0.029411764705882353, 0.017857142857142856, 0.011494252873563218, 0, 0.041666666666666664, 0, 0, 0.025, 0, 0, 0.06666666666666667, 0, 0, 0, 0, 0, 0.05263157894736842, 0.046153846153846156, 0.047619047619047616, 0.013513513513513514, 0.1, 0, 0.0392156862745098, 0, 0, 0.10714285714285714, 0, 0, 0, 0, 0, 0, 0, 0.16666666666666666, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0, 0.017857142857142856, 0.03125, 0, 0.0625, 0.1, 0, 0.029850746268656716, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0.017857142857142856, 0.03125, 0, 0.0625, 0.1, 0, 0.029850746268656716, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.023809523809523808, 0.01098901098901099, 0.1, 0, 0, 0, 0, 0, 0, 0.017857142857142856, 0.03125, 0, 0.0625, 0.1, 0, 0.029850746268656716, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011627906976744186, 0.1, 0, 0, 0, 0, 0, 0, 0.017857142857142856, 0.03125, 0, 0.0625, 0.1, 0, 0.029850746268656716, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0.1, 0, 0, 0, 0, 0, 0, 0.017857142857142856, 0.03125, 0, 0.0625, 0.1, 0, 0.029850746268656716, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0.01098901098901099, 0.1, 0, 0, 0, 0, 0, 0, 0.017857142857142856, 0.03125, 0, 0.0625, 0.1, 0, 0.029850746268656716, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.024390243902439025, 0, 0.024390243902439025, 0, 0, 0, 0.1, 0, 0.023809523809523808, 0, 0.02127659574468085, 0, 0, 0.029411764705882353, 0, 0, 0, 0, 0, 0, 0, 0, 0.05555555555555555, 0.046153846153846156, 0.05263157894736842, 0.05128205128205128, 0, 0.02197802197802198, 0, 0.025, 0.03773584905660377, 0, 0, 0.3333333333333333, 0.0392156862745098, 0, 0, 0.10714285714285714, 0.024390243902439025, 0, 0.024390243902439025, 0, 0, 0.1, 0, 0.05555555555555555, 0, 0, 0, 0, 0.2, 0, 0.038461538461538464, 0.07142857142857142, 0.015873015873015872, 0.03125, 0.031578947368421054, 0.05555555555555555, 0.03333333333333333, 0, 0.10714285714285714, 0, 0.02127659574468085, 0, 0, 0, 0.05555555555555555, 0, 0, 0, 0, 0.2, 0.038461538461538464, 0.015873015873015872, 0.03125, 0.038461538461538464, 0.014388489208633094, 0.03333333333333333, 0, 0.10714285714285714, 0, 0.018518518518518517, 0, 0, 0.008771929824561403, 0.75, 0.045454545454545456, 0.02127659574468085, 0.5, 0.02857142857142857, 0.031578947368421054, 0.045454545454545456, 0.023255813953488372, 0.041666666666666664, 0, 0.027777777777777776, 0, 0, 0, 0.00819672131147541, 0.75, 0.05, 0.02127659574468085, 0.5, 0.02857142857142857, 0.029411764705882353, 0.047619047619047616, 0.024390243902439025, 0.045454545454545456, 0, 0.027777777777777776, 0, 0, 0, 0, 0, 0, 0.1, 0.02564102564102564, 0.00641025641025641, 0.1, 0, 0.017857142857142856, 0, 0, 0, 0, 0, 0, 0, 0.010101010101010102, 0, 0, 0, 0, 0, 0, 0.03125, 0, 0, 0, 0, 0.1, 0, 0.012195121951219513, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0.009174311926605505, 0, 0, 0.030612244897959183, 0, 0, 0, 0, 0, 0.03125, 0.1, 0.029411764705882353, 0, 0, 0, 0, 0, 0, 0, 0.009708737864077669, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03125, 0.1, 0.029411764705882353, 0, 0, 0, 0, 0, 0, 0, 0.009708737864077669, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03125, 0.1, 0.029411764705882353, 0, 0, 0, 0, 0, 0, 0, 0.009708737864077669, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03125, 0.1, 0.029411764705882353, 0, 0, 0, 0, 0, 0, 0, 0.009708737864077669, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03125, 0.1, 0.029411764705882353, 0, 0, 0, 0, 0, 0, 0, 0.009708737864077669, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03125, 0.1, 0.029411764705882353, 0, 0, 0, 0, 0, 0, 0, 0.009708737864077669, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03125, 0, 0, 0, 0.018518518518518517, 0, 0.02631578947368421, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0.04522613065326633, 0, 0, 0, 0.022727272727272728, 0, 0, 0.015873015873015872, 0, 0, 0, 0, 0.16666666666666666, 0, 0.01020408163265306, 0, 0, 0, 0, 0.03225806451612903, 0, 0, 0, 0, 0.1, 0.09090909090909091 ]
637
0.019985
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import base64 import json import os import pipes import random import re import stat import tempfile import time from abc import ABCMeta, abstractmethod from ansible.compat.six import binary_type, text_type, iteritems, with_metaclass from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure from ansible.executor.module_common import modify_module from ansible.parsing.utils.jsonify import jsonify from ansible.utils.unicode import to_bytes, to_unicode try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class ActionBase(with_metaclass(ABCMeta, object)): ''' This class is the base class for all action plugins, and defines code common to all actions. The base class handles the connection by putting/getting files and executing commands based on the current action in use. ''' def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj): self._task = task self._connection = connection self._play_context = play_context self._loader = loader self._templar = templar self._shared_loader_obj = shared_loader_obj # Backwards compat: self._display isn't really needed, just import the global display and use that. self._display = display self._supports_check_mode = True @abstractmethod def run(self, tmp=None, task_vars=None): """ Action Plugins should implement this method to perform their tasks. Everything else in this base class is a helper method for the action plugin to do that. :kwarg tmp: Temporary directory. Sometimes an action plugin sets up a temporary directory and then calls another module. This parameter allows us to reuse the same directory for both. :kwarg task_vars: The variables (host vars, group vars, config vars, etc) associated with this task. :returns: dictionary of results from the module Implementors of action modules may find the following variables especially useful: * Module parameters. These are stored in self._task.args """ # store the module invocation details into the results results = {} if self._task.async == 0: results['invocation'] = dict( module_name = self._task.action, module_args = self._task.args, ) return results def _remote_file_exists(self, path): cmd = self._connection._shell.exists(path) result = self._low_level_execute_command(cmd=cmd, sudoable=True) if result['rc'] == 0: return True return False def _configure_module(self, module_name, module_args, task_vars=None): ''' Handles the loading and templating of the module code through the modify_module() function. ''' if task_vars is None: task_vars = dict() # Search module path(s) for named module. for mod_type in self._connection.module_implementation_preferences: # Check to determine if PowerShell modules are supported, and apply # some fixes (hacks) to module name + args. if mod_type == '.ps1': # win_stat, win_file, and win_copy are not just like their # python counterparts but they are compatible enough for our # internal usage if module_name in ('stat', 'file', 'copy') and self._task.action != module_name: module_name = 'win_%s' % module_name # Remove extra quotes surrounding path parameters before sending to module. if module_name in ('win_stat', 'win_file', 'win_copy', 'slurp') and module_args and hasattr(self._connection._shell, '_unquote'): for key in ('src', 'dest', 'path'): if key in module_args: module_args[key] = self._connection._shell._unquote(module_args[key]) module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, mod_type) if module_path: break else: # Use Windows version of ping module to check module paths when # using a connection that supports .ps1 suffixes. We check specifically # for win_ping here, otherwise the code would look for ping.ps1 if '.ps1' in self._connection.module_implementation_preferences: ping_module = 'win_ping' else: ping_module = 'ping' module_path2 = self._shared_loader_obj.module_loader.find_plugin(ping_module, self._connection.module_implementation_preferences) if module_path2 is not None: raise AnsibleError("The module %s was not found in configured module paths" % (module_name)) else: raise AnsibleError("The module %s was not found in configured module paths. " "Additionally, core modules are missing. If this is a checkout, " "run 'git submodule update --init --recursive' to correct this problem." % (module_name)) # insert shared code and arguments into the module (module_data, module_style, module_shebang) = modify_module(module_path, module_args, task_vars=task_vars) return (module_style, module_shebang, module_data) def _compute_environment_string(self): ''' Builds the environment string to be used when executing the remote task. ''' final_environment = dict() if self._task.environment is not None: environments = self._task.environment if not isinstance(environments, list): environments = [ environments ] # the environments as inherited need to be reversed, to make # sure we merge in the parent's values first so those in the # block then task 'win' in precedence environments.reverse() for environment in environments: if environment is None: continue temp_environment = self._templar.template(environment) if not isinstance(temp_environment, dict): raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment))) # very deliberately using update here instead of combine_vars, as # these environment settings should not need to merge sub-dicts final_environment.update(temp_environment) final_environment = self._templar.template(final_environment) return self._connection._shell.env_prefix(**final_environment) def _early_needs_tmp_path(self): ''' Determines if a temp path should be created before the action is executed. ''' return getattr(self, 'TRANSFERS_FILES', False) def _late_needs_tmp_path(self, tmp, module_style): ''' Determines if a temp path is required after some early actions have already taken place. ''' if tmp and "tmp" in tmp: # tmp has already been created return False if not self._connection.has_pipelining or not self._play_context.pipelining or C.DEFAULT_KEEP_REMOTE_FILES or self._play_context.become_method == 'su': # tmp is necessary to store the module source code # or we want to keep the files on the target system return True if module_style != "new": # even when conn has pipelining, old style modules need tmp to store arguments return True return False def _make_tmp_path(self): ''' Create and return a temporary path on a remote box. ''' basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) use_system_tmp = False if self._play_context.become and self._play_context.become_user != 'root': use_system_tmp = True tmp_mode = None if self._play_context.remote_user != 'root' or self._play_context.become and self._play_context.become_user != 'root': tmp_mode = 0o755 cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode) result = self._low_level_execute_command(cmd, sudoable=False) # error handling on this seems a little aggressive? if result['rc'] != 0: if result['rc'] == 5: output = 'Authentication failure.' elif result['rc'] == 255 and self._connection.transport in ('ssh',): if self._play_context.verbosity > 3: output = u'SSH encountered an unknown error. The output was:\n%s%s' % (result['stdout'], result['stderr']) else: output = (u'SSH encountered an unknown error during the connection.' ' We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue') elif u'No space left on device' in result['stderr']: output = result['stderr'] else: output = ('Authentication or permission failure.' ' In some cases, you may have been able to authenticate and did not have permissions on the remote directory.' ' Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp".' ' Failed command was: %s, exited with result %d' % (cmd, result['rc'])) if 'stdout' in result and result['stdout'] != u'': output = output + u": %s" % result['stdout'] raise AnsibleConnectionFailure(output) try: rc = self._connection._shell.join_path(result['stdout'].strip(), u'').splitlines()[-1] except IndexError: # stdout was empty or just space, set to / to trigger error in next if rc = '/' # Catch failure conditions, files should never be # written to locations in /. if rc == '/': raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basefile, cmd)) return rc def _remove_tmp_path(self, tmp_path): '''Remove a temporary path we created. ''' if tmp_path and "-tmp-" in tmp_path: cmd = self._connection._shell.remove(tmp_path, recurse=True) # If we have gotten here we have a working ssh configuration. # If ssh breaks we could leave tmp directories out on the remote system. self._low_level_execute_command(cmd, sudoable=False) def _transfer_data(self, remote_path, data): ''' Copies the module data out to the temporary module path. ''' if isinstance(data, dict): data = jsonify(data) afd, afile = tempfile.mkstemp() afo = os.fdopen(afd, 'w') try: data = to_bytes(data, errors='strict') afo.write(data) except Exception as e: #raise AnsibleError("failure encoding into utf-8: %s" % str(e)) raise AnsibleError("failure writing module data to temporary file for transfer: %s" % str(e)) afo.flush() afo.close() try: self._connection.put_file(afile, remote_path) finally: os.unlink(afile) return remote_path def _remote_chmod(self, mode, path, sudoable=False): ''' Issue a remote chmod command ''' cmd = self._connection._shell.chmod(mode, path) res = self._low_level_execute_command(cmd, sudoable=sudoable) return res def _execute_remote_stat(self, path, all_vars, follow, tmp=None): ''' Get information from remote file. ''' module_args=dict( path=path, follow=follow, get_md5=False, get_checksum=True, checksum_algo='sha1', ) mystat = self._execute_module(module_name='stat', module_args=module_args, task_vars=all_vars, tmp=tmp, delete_remote_tmp=(tmp is None)) if 'failed' in mystat and mystat['failed']: raise AnsibleError('Failed to get information on remote file (%s): %s' % (path, mystat['msg'])) if not mystat['stat']['exists']: # empty might be matched, 1 should never match, also backwards compatible mystat['stat']['checksum'] = '1' # happens sometimes when it is a dir and not on bsd if not 'checksum' in mystat['stat']: mystat['stat']['checksum'] = '' return mystat['stat'] def _remote_checksum(self, path, all_vars): ''' Produces a remote checksum given a path, Returns a number 0-4 for specific errors instead of checksum, also ensures it is different 0 = unknown error 1 = file does not exist, this might not be an error 2 = permissions issue 3 = its a directory, not a file 4 = stat module failed, likely due to not finding python ''' x = "0" # unknown error has occured try: remote_stat = self._execute_remote_stat(path, all_vars, follow=False) if remote_stat['exists'] and remote_stat['isdir']: x = "3" # its a directory not a file else: x = remote_stat['checksum'] # if 1, file is missing except AnsibleError as e: errormsg = to_unicode(e) if errormsg.endswith('Permission denied'): x = "2" # cannot read file elif errormsg.endswith('MODULE FAILURE'): x = "4" # python not found or module uncaught exception finally: return x def _remote_expand_user(self, path): ''' takes a remote path and performs tilde expansion on the remote host ''' if not path.startswith('~'): # FIXME: Windows paths may start with "~ instead of just ~ return path # FIXME: Can't use os.path.sep for Windows paths. split_path = path.split(os.path.sep, 1) expand_path = split_path[0] if expand_path == '~': if self._play_context.become and self._play_context.become_user: expand_path = '~%s' % self._play_context.become_user cmd = self._connection._shell.expand_user(expand_path) data = self._low_level_execute_command(cmd, sudoable=False) #initial_fragment = utils.last_non_blank_line(data['stdout']) initial_fragment = data['stdout'].strip().splitlines()[-1] if not initial_fragment: # Something went wrong trying to expand the path remotely. Return # the original string return path if len(split_path) > 1: return self._connection._shell.join_path(initial_fragment, *split_path[1:]) else: return initial_fragment def _filter_leading_non_json_lines(self, data): ''' Used to avoid random output from SSH at the top of JSON output, like messages from tcagetattr, or where dropbear spews MOTD on every single command (which is nuts). need to filter anything which starts not with '{', '[', ', '=' or is an empty line. filter only leading lines since multiline JSON is valid. ''' idx = 0 for line in data.splitlines(True): if line.startswith((u'{', u'[')): break idx = idx + len(line) return data[idx:] def _strip_success_message(self, data): ''' Removes the BECOME-SUCCESS message from the data. ''' if data.strip().startswith('BECOME-SUCCESS-'): data = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', data) return data def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True): ''' Transfer and run a module along with its arguments. ''' if task_vars is None: task_vars = dict() # if a module name was not specified for this execution, use # the action from the task if module_name is None: module_name = self._task.action if module_args is None: module_args = self._task.args # set check mode in the module arguments, if required if self._play_context.check_mode: if not self._supports_check_mode: raise AnsibleError("check mode is not supported for this operation") module_args['_ansible_check_mode'] = True else: module_args['_ansible_check_mode'] = False # set no log in the module arguments, if required module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG # set debug in the module arguments, if required module_args['_ansible_debug'] = C.DEFAULT_DEBUG # let module know we are in diff mode module_args['_ansible_diff'] = self._play_context.diff # let module know our verbosity module_args['_ansible_verbosity'] = self._display.verbosity (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars) if not shebang: raise AnsibleError("module (%s) is missing interpreter line" % module_name) # a remote tmp path may be necessary and not already created remote_module_path = None args_file_path = None if not tmp and self._late_needs_tmp_path(tmp, module_style): tmp = self._make_tmp_path() if tmp: remote_module_filename = self._connection._shell.get_remote_filename(module_name) remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename) if module_style in ['old', 'non_native_want_json']: # we'll also need a temp file to hold our module arguments args_file_path = self._connection._shell.join_path(tmp, 'args') if remote_module_path or module_style != 'new': display.debug("transferring module to remote") self._transfer_data(remote_module_path, module_data) if module_style == 'old': # we need to dump the module args to a k=v string in a file on # the remote system, which can be read and parsed by the module args_data = "" for k,v in iteritems(module_args): args_data += '%s="%s" ' % (k, pipes.quote(text_type(v))) self._transfer_data(args_file_path, args_data) elif module_style == 'non_native_want_json': self._transfer_data(args_file_path, json.dumps(module_args)) display.debug("done transferring module to remote") environment_string = self._compute_environment_string() if tmp and "tmp" in tmp and self._play_context.become and self._play_context.become_user != 'root': # deal with possible umask issues once sudo'ed to other user self._remote_chmod('a+r', remote_module_path) if args_file_path is not None: self._remote_chmod('a+r', args_file_path) cmd = "" in_data = None if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new': in_data = module_data else: if remote_module_path: cmd = remote_module_path rm_tmp = None if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: if not self._play_context.become or self._play_context.become_user == 'root': # not sudoing or sudoing to root, so can cleanup files in the same step rm_tmp = tmp cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path, rm_tmp=rm_tmp) cmd = cmd.strip() sudoable = True if module_name == "accelerate": # always run the accelerate module as the user # specified in the play, not the sudo_user sudoable = False res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data) if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: if self._play_context.become and self._play_context.become_user != 'root': # not sudoing to root, so maybe can't delete files as that other user # have to clean up temp files as original user in a second step cmd2 = self._connection._shell.remove(tmp, recurse=True) self._low_level_execute_command(cmd2, sudoable=False) try: data = json.loads(self._filter_leading_non_json_lines(res.get('stdout', u''))) except ValueError: # not valid json, lets try to capture error data = dict(failed=True, parsed=False) data['msg'] = "MODULE FAILURE" data['module_stdout'] = res.get('stdout', u'') if 'stderr' in res: data['module_stderr'] = res['stderr'] if res['stderr'].startswith(u'Traceback'): data['exception'] = res['stderr'] # pre-split stdout into lines, if stdout is in the data and there # isn't already a stdout_lines value there if 'stdout' in data and 'stdout_lines' not in data: data['stdout_lines'] = data.get('stdout', u'').splitlines() display.debug("done with _execute_module (%s, %s)" % (module_name, module_args)) return data def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=C.DEFAULT_EXECUTABLE, encoding_errors='replace'): ''' This is the function which executes the low level shell command, which may be commands to create/remove directories for temporary files, or to run the module code or python directly when pipelining. :kwarg encoding_errors: If the value returned by the command isn't utf-8 then we have to figure out how to transform it to unicode. If the value is just going to be displayed to the user (or discarded) then the default of 'replace' is fine. If the data is used as a key or is going to be written back out to a file verbatim, then this won't work. May have to use some sort of replacement strategy (python3 could use surrogateescape) ''' display.debug("_low_level_execute_command(): starting") if not cmd: # this can happen with powershell modules when there is no analog to a Windows command (like chmod) display.debug("_low_level_execute_command(): no command, exiting") return dict(stdout='', stderr='') allow_same_user = C.BECOME_ALLOW_SAME_USER same_user = self._play_context.become_user == self._play_context.remote_user if sudoable and self._play_context.become and (allow_same_user or not same_user): display.debug("_low_level_execute_command(): using become for this command") cmd = self._play_context.make_become_cmd(cmd, executable=executable) if executable is not None and self._connection.allow_executable: cmd = executable + ' -c ' + pipes.quote(cmd) display.debug("_low_level_execute_command(): executing: %s" % (cmd,)) rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) # stdout and stderr may be either a file-like or a bytes object. # Convert either one to a text type if isinstance(stdout, binary_type): out = to_unicode(stdout, errors=encoding_errors) elif not isinstance(stdout, text_type): out = to_unicode(b''.join(stdout.readlines()), errors=encoding_errors) else: out = stdout if isinstance(stderr, binary_type): err = to_unicode(stderr, errors=encoding_errors) elif not isinstance(stderr, text_type): err = to_unicode(b''.join(stderr.readlines()), errors=encoding_errors) else: err = stderr if rc is None: rc = 0 # be sure to remove the BECOME-SUCCESS message now out = self._strip_success_message(out) display.debug("_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, stdout, stderr)) return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err) def _get_first_available_file(self, faf, of=None, searchdir='files'): display.deprecated("first_available_file, use with_first_found or lookup('first_found',...) instead") for fn in faf: fnt = self._templar.template(fn) if self._task._role is not None: lead = self._task._role._role_path else: lead = fnt fnd = self._loader.path_dwim_relative(lead, searchdir, fnt) if not os.path.exists(fnd) and of is not None: if self._task._role is not None: lead = self._task._role._role_path else: lead = of fnd = self._loader.path_dwim_relative(lead, searchdir, of) if os.path.exists(fnd): return fnd return None def _get_diff_data(self, destination, source, task_vars, source_file=True): diff = {} display.debug("Going to peek to see if file has changed permissions") peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, diff_peek=True), task_vars=task_vars, persist_files=True) if not('failed' in peek_result and peek_result['failed']) or peek_result.get('rc', 0) == 0: if peek_result['state'] == 'absent': diff['before'] = '' elif peek_result['appears_binary']: diff['dst_binary'] = 1 elif peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF: diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF else: display.debug("Slurping the file %s" % source) dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), task_vars=task_vars, persist_files=True) if 'content' in dest_result: dest_contents = dest_result['content'] if dest_result['encoding'] == 'base64': dest_contents = base64.b64decode(dest_contents) else: raise AnsibleError("unknown encoding in content option, failed: %s" % dest_result) diff['before_header'] = destination diff['before'] = dest_contents if source_file: st = os.stat(source) if st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF: diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF else: display.debug("Reading local copy of the file %s" % source) try: src = open(source) src_contents = src.read() except Exception as e: raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, str(e))) if "\x00" in src_contents: diff['src_binary'] = 1 else: diff['after_header'] = source diff['after'] = src_contents else: display.debug("source of file passed in") diff['after_header'] = 'dynamically generated' diff['after'] = source if self._play_context.no_log: if 'before' in diff: diff["before"] = "" if 'after' in diff: diff["after"] = " [[ Diff output has been hidden because 'no_log: true' was specified for this result ]]" return diff
[ "# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>\n", "#\n", "# This file is part of Ansible\n", "#\n", "# Ansible is free software: you can redistribute it and/or modify\n", "# it under the terms of the GNU General Public License as published by\n", "# the Free Software Foundation, either version 3 of the License, or\n", "# (at your option) any later version.\n", "#\n", "# Ansible is distributed in the hope that it will be useful,\n", "# but WITHOUT ANY WARRANTY; without even the implied warranty of\n", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n", "# GNU General Public License for more details.\n", "#\n", "# You should have received a copy of the GNU General Public License\n", "# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n", "\n", "# Make coding more python3-ish\n", "from __future__ import (absolute_import, division, print_function)\n", "__metaclass__ = type\n", "\n", "import base64\n", "import json\n", "import os\n", "import pipes\n", "import random\n", "import re\n", "import stat\n", "import tempfile\n", "import time\n", "from abc import ABCMeta, abstractmethod\n", "\n", "from ansible.compat.six import binary_type, text_type, iteritems, with_metaclass\n", "\n", "from ansible import constants as C\n", "from ansible.errors import AnsibleError, AnsibleConnectionFailure\n", "from ansible.executor.module_common import modify_module\n", "from ansible.parsing.utils.jsonify import jsonify\n", "from ansible.utils.unicode import to_bytes, to_unicode\n", "\n", "try:\n", " from __main__ import display\n", "except ImportError:\n", " from ansible.utils.display import Display\n", " display = Display()\n", "\n", "\n", "class ActionBase(with_metaclass(ABCMeta, object)):\n", "\n", " '''\n", " This class is the base class for all action plugins, and defines\n", " code common to all actions. The base class handles the connection\n", " by putting/getting files and executing commands based on the current\n", " action in use.\n", " '''\n", "\n", " def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj):\n", " self._task = task\n", " self._connection = connection\n", " self._play_context = play_context\n", " self._loader = loader\n", " self._templar = templar\n", " self._shared_loader_obj = shared_loader_obj\n", " # Backwards compat: self._display isn't really needed, just import the global display and use that.\n", " self._display = display\n", "\n", " self._supports_check_mode = True\n", "\n", " @abstractmethod\n", " def run(self, tmp=None, task_vars=None):\n", " \"\"\" Action Plugins should implement this method to perform their\n", " tasks. Everything else in this base class is a helper method for the\n", " action plugin to do that.\n", "\n", " :kwarg tmp: Temporary directory. Sometimes an action plugin sets up\n", " a temporary directory and then calls another module. This parameter\n", " allows us to reuse the same directory for both.\n", " :kwarg task_vars: The variables (host vars, group vars, config vars,\n", " etc) associated with this task.\n", " :returns: dictionary of results from the module\n", "\n", " Implementors of action modules may find the following variables especially useful:\n", "\n", " * Module parameters. These are stored in self._task.args\n", " \"\"\"\n", " # store the module invocation details into the results\n", " results = {}\n", " if self._task.async == 0:\n", " results['invocation'] = dict(\n", " module_name = self._task.action,\n", " module_args = self._task.args,\n", " )\n", " return results\n", "\n", " def _remote_file_exists(self, path):\n", " cmd = self._connection._shell.exists(path)\n", " result = self._low_level_execute_command(cmd=cmd, sudoable=True)\n", " if result['rc'] == 0:\n", " return True\n", " return False\n", "\n", " def _configure_module(self, module_name, module_args, task_vars=None):\n", " '''\n", " Handles the loading and templating of the module code through the\n", " modify_module() function.\n", " '''\n", " if task_vars is None:\n", " task_vars = dict()\n", "\n", " # Search module path(s) for named module.\n", " for mod_type in self._connection.module_implementation_preferences:\n", " # Check to determine if PowerShell modules are supported, and apply\n", " # some fixes (hacks) to module name + args.\n", " if mod_type == '.ps1':\n", " # win_stat, win_file, and win_copy are not just like their\n", " # python counterparts but they are compatible enough for our\n", " # internal usage\n", " if module_name in ('stat', 'file', 'copy') and self._task.action != module_name:\n", " module_name = 'win_%s' % module_name\n", "\n", " # Remove extra quotes surrounding path parameters before sending to module.\n", " if module_name in ('win_stat', 'win_file', 'win_copy', 'slurp') and module_args and hasattr(self._connection._shell, '_unquote'):\n", " for key in ('src', 'dest', 'path'):\n", " if key in module_args:\n", " module_args[key] = self._connection._shell._unquote(module_args[key])\n", "\n", " module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, mod_type)\n", " if module_path:\n", " break\n", " else:\n", " # Use Windows version of ping module to check module paths when\n", " # using a connection that supports .ps1 suffixes. We check specifically\n", " # for win_ping here, otherwise the code would look for ping.ps1\n", " if '.ps1' in self._connection.module_implementation_preferences:\n", " ping_module = 'win_ping'\n", " else:\n", " ping_module = 'ping'\n", " module_path2 = self._shared_loader_obj.module_loader.find_plugin(ping_module, self._connection.module_implementation_preferences)\n", " if module_path2 is not None:\n", " raise AnsibleError(\"The module %s was not found in configured module paths\" % (module_name))\n", " else:\n", " raise AnsibleError(\"The module %s was not found in configured module paths. \"\n", " \"Additionally, core modules are missing. If this is a checkout, \"\n", " \"run 'git submodule update --init --recursive' to correct this problem.\" % (module_name))\n", "\n", " # insert shared code and arguments into the module\n", " (module_data, module_style, module_shebang) = modify_module(module_path, module_args, task_vars=task_vars)\n", "\n", " return (module_style, module_shebang, module_data)\n", "\n", " def _compute_environment_string(self):\n", " '''\n", " Builds the environment string to be used when executing the remote task.\n", " '''\n", "\n", " final_environment = dict()\n", " if self._task.environment is not None:\n", " environments = self._task.environment\n", " if not isinstance(environments, list):\n", " environments = [ environments ]\n", "\n", " # the environments as inherited need to be reversed, to make\n", " # sure we merge in the parent's values first so those in the\n", " # block then task 'win' in precedence\n", " environments.reverse()\n", " for environment in environments:\n", " if environment is None:\n", " continue\n", " temp_environment = self._templar.template(environment)\n", " if not isinstance(temp_environment, dict):\n", " raise AnsibleError(\"environment must be a dictionary, received %s (%s)\" % (temp_environment, type(temp_environment)))\n", " # very deliberately using update here instead of combine_vars, as\n", " # these environment settings should not need to merge sub-dicts\n", " final_environment.update(temp_environment)\n", "\n", " final_environment = self._templar.template(final_environment)\n", " return self._connection._shell.env_prefix(**final_environment)\n", "\n", " def _early_needs_tmp_path(self):\n", " '''\n", " Determines if a temp path should be created before the action is executed.\n", " '''\n", "\n", " return getattr(self, 'TRANSFERS_FILES', False)\n", "\n", " def _late_needs_tmp_path(self, tmp, module_style):\n", " '''\n", " Determines if a temp path is required after some early actions have already taken place.\n", " '''\n", " if tmp and \"tmp\" in tmp:\n", " # tmp has already been created\n", " return False\n", " if not self._connection.has_pipelining or not self._play_context.pipelining or C.DEFAULT_KEEP_REMOTE_FILES or self._play_context.become_method == 'su':\n", " # tmp is necessary to store the module source code\n", " # or we want to keep the files on the target system\n", " return True\n", " if module_style != \"new\":\n", " # even when conn has pipelining, old style modules need tmp to store arguments\n", " return True\n", " return False\n", "\n", " def _make_tmp_path(self):\n", " '''\n", " Create and return a temporary path on a remote box.\n", " '''\n", "\n", " basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))\n", " use_system_tmp = False\n", "\n", " if self._play_context.become and self._play_context.become_user != 'root':\n", " use_system_tmp = True\n", "\n", " tmp_mode = None\n", " if self._play_context.remote_user != 'root' or self._play_context.become and self._play_context.become_user != 'root':\n", " tmp_mode = 0o755\n", "\n", " cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode)\n", " result = self._low_level_execute_command(cmd, sudoable=False)\n", "\n", " # error handling on this seems a little aggressive?\n", " if result['rc'] != 0:\n", " if result['rc'] == 5:\n", " output = 'Authentication failure.'\n", " elif result['rc'] == 255 and self._connection.transport in ('ssh',):\n", "\n", " if self._play_context.verbosity > 3:\n", " output = u'SSH encountered an unknown error. The output was:\\n%s%s' % (result['stdout'], result['stderr'])\n", " else:\n", " output = (u'SSH encountered an unknown error during the connection.'\n", " ' We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue')\n", "\n", " elif u'No space left on device' in result['stderr']:\n", " output = result['stderr']\n", " else:\n", " output = ('Authentication or permission failure.'\n", " ' In some cases, you may have been able to authenticate and did not have permissions on the remote directory.'\n", " ' Consider changing the remote temp path in ansible.cfg to a path rooted in \"/tmp\".'\n", " ' Failed command was: %s, exited with result %d' % (cmd, result['rc']))\n", " if 'stdout' in result and result['stdout'] != u'':\n", " output = output + u\": %s\" % result['stdout']\n", " raise AnsibleConnectionFailure(output)\n", "\n", " try:\n", " rc = self._connection._shell.join_path(result['stdout'].strip(), u'').splitlines()[-1]\n", " except IndexError:\n", " # stdout was empty or just space, set to / to trigger error in next if\n", " rc = '/'\n", "\n", " # Catch failure conditions, files should never be\n", " # written to locations in /.\n", " if rc == '/':\n", " raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basefile, cmd))\n", "\n", " return rc\n", "\n", " def _remove_tmp_path(self, tmp_path):\n", " '''Remove a temporary path we created. '''\n", "\n", " if tmp_path and \"-tmp-\" in tmp_path:\n", " cmd = self._connection._shell.remove(tmp_path, recurse=True)\n", " # If we have gotten here we have a working ssh configuration.\n", " # If ssh breaks we could leave tmp directories out on the remote system.\n", " self._low_level_execute_command(cmd, sudoable=False)\n", "\n", " def _transfer_data(self, remote_path, data):\n", " '''\n", " Copies the module data out to the temporary module path.\n", " '''\n", "\n", " if isinstance(data, dict):\n", " data = jsonify(data)\n", "\n", " afd, afile = tempfile.mkstemp()\n", " afo = os.fdopen(afd, 'w')\n", " try:\n", " data = to_bytes(data, errors='strict')\n", " afo.write(data)\n", " except Exception as e:\n", " #raise AnsibleError(\"failure encoding into utf-8: %s\" % str(e))\n", " raise AnsibleError(\"failure writing module data to temporary file for transfer: %s\" % str(e))\n", "\n", " afo.flush()\n", " afo.close()\n", "\n", " try:\n", " self._connection.put_file(afile, remote_path)\n", " finally:\n", " os.unlink(afile)\n", "\n", " return remote_path\n", "\n", " def _remote_chmod(self, mode, path, sudoable=False):\n", " '''\n", " Issue a remote chmod command\n", " '''\n", "\n", " cmd = self._connection._shell.chmod(mode, path)\n", " res = self._low_level_execute_command(cmd, sudoable=sudoable)\n", " return res\n", "\n", " def _execute_remote_stat(self, path, all_vars, follow, tmp=None):\n", " '''\n", " Get information from remote file.\n", " '''\n", " module_args=dict(\n", " path=path,\n", " follow=follow,\n", " get_md5=False,\n", " get_checksum=True,\n", " checksum_algo='sha1',\n", " )\n", " mystat = self._execute_module(module_name='stat', module_args=module_args, task_vars=all_vars, tmp=tmp, delete_remote_tmp=(tmp is None))\n", "\n", " if 'failed' in mystat and mystat['failed']:\n", " raise AnsibleError('Failed to get information on remote file (%s): %s' % (path, mystat['msg']))\n", "\n", " if not mystat['stat']['exists']:\n", " # empty might be matched, 1 should never match, also backwards compatible\n", " mystat['stat']['checksum'] = '1'\n", "\n", " # happens sometimes when it is a dir and not on bsd\n", " if not 'checksum' in mystat['stat']:\n", " mystat['stat']['checksum'] = ''\n", "\n", " return mystat['stat']\n", "\n", " def _remote_checksum(self, path, all_vars):\n", " '''\n", " Produces a remote checksum given a path,\n", " Returns a number 0-4 for specific errors instead of checksum, also ensures it is different\n", " 0 = unknown error\n", " 1 = file does not exist, this might not be an error\n", " 2 = permissions issue\n", " 3 = its a directory, not a file\n", " 4 = stat module failed, likely due to not finding python\n", " '''\n", " x = \"0\" # unknown error has occured\n", " try:\n", " remote_stat = self._execute_remote_stat(path, all_vars, follow=False)\n", " if remote_stat['exists'] and remote_stat['isdir']:\n", " x = \"3\" # its a directory not a file\n", " else:\n", " x = remote_stat['checksum'] # if 1, file is missing\n", " except AnsibleError as e:\n", " errormsg = to_unicode(e)\n", " if errormsg.endswith('Permission denied'):\n", " x = \"2\" # cannot read file\n", " elif errormsg.endswith('MODULE FAILURE'):\n", " x = \"4\" # python not found or module uncaught exception\n", " finally:\n", " return x\n", "\n", "\n", " def _remote_expand_user(self, path):\n", " ''' takes a remote path and performs tilde expansion on the remote host '''\n", " if not path.startswith('~'): # FIXME: Windows paths may start with \"~ instead of just ~\n", " return path\n", "\n", " # FIXME: Can't use os.path.sep for Windows paths.\n", " split_path = path.split(os.path.sep, 1)\n", " expand_path = split_path[0]\n", " if expand_path == '~':\n", " if self._play_context.become and self._play_context.become_user:\n", " expand_path = '~%s' % self._play_context.become_user\n", "\n", " cmd = self._connection._shell.expand_user(expand_path)\n", " data = self._low_level_execute_command(cmd, sudoable=False)\n", " #initial_fragment = utils.last_non_blank_line(data['stdout'])\n", " initial_fragment = data['stdout'].strip().splitlines()[-1]\n", "\n", " if not initial_fragment:\n", " # Something went wrong trying to expand the path remotely. Return\n", " # the original string\n", " return path\n", "\n", " if len(split_path) > 1:\n", " return self._connection._shell.join_path(initial_fragment, *split_path[1:])\n", " else:\n", " return initial_fragment\n", "\n", " def _filter_leading_non_json_lines(self, data):\n", " '''\n", " Used to avoid random output from SSH at the top of JSON output, like messages from\n", " tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).\n", "\n", " need to filter anything which starts not with '{', '[', ', '=' or is an empty line.\n", " filter only leading lines since multiline JSON is valid.\n", " '''\n", " idx = 0\n", " for line in data.splitlines(True):\n", " if line.startswith((u'{', u'[')):\n", " break\n", " idx = idx + len(line)\n", "\n", " return data[idx:]\n", "\n", " def _strip_success_message(self, data):\n", " '''\n", " Removes the BECOME-SUCCESS message from the data.\n", " '''\n", " if data.strip().startswith('BECOME-SUCCESS-'):\n", " data = re.sub(r'^((\\r)?\\n)?BECOME-SUCCESS.*(\\r)?\\n', '', data)\n", " return data\n", "\n", " def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True):\n", " '''\n", " Transfer and run a module along with its arguments.\n", " '''\n", " if task_vars is None:\n", " task_vars = dict()\n", "\n", " # if a module name was not specified for this execution, use\n", " # the action from the task\n", " if module_name is None:\n", " module_name = self._task.action\n", " if module_args is None:\n", " module_args = self._task.args\n", "\n", " # set check mode in the module arguments, if required\n", " if self._play_context.check_mode:\n", " if not self._supports_check_mode:\n", " raise AnsibleError(\"check mode is not supported for this operation\")\n", " module_args['_ansible_check_mode'] = True\n", " else:\n", " module_args['_ansible_check_mode'] = False\n", "\n", " # set no log in the module arguments, if required\n", " module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG\n", "\n", " # set debug in the module arguments, if required\n", " module_args['_ansible_debug'] = C.DEFAULT_DEBUG\n", "\n", " # let module know we are in diff mode\n", " module_args['_ansible_diff'] = self._play_context.diff\n", "\n", " # let module know our verbosity\n", " module_args['_ansible_verbosity'] = self._display.verbosity\n", "\n", " (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)\n", " if not shebang:\n", " raise AnsibleError(\"module (%s) is missing interpreter line\" % module_name)\n", "\n", " # a remote tmp path may be necessary and not already created\n", " remote_module_path = None\n", " args_file_path = None\n", " if not tmp and self._late_needs_tmp_path(tmp, module_style):\n", " tmp = self._make_tmp_path()\n", "\n", " if tmp:\n", " remote_module_filename = self._connection._shell.get_remote_filename(module_name)\n", " remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename)\n", " if module_style in ['old', 'non_native_want_json']:\n", " # we'll also need a temp file to hold our module arguments\n", " args_file_path = self._connection._shell.join_path(tmp, 'args')\n", "\n", " if remote_module_path or module_style != 'new':\n", " display.debug(\"transferring module to remote\")\n", " self._transfer_data(remote_module_path, module_data)\n", " if module_style == 'old':\n", " # we need to dump the module args to a k=v string in a file on\n", " # the remote system, which can be read and parsed by the module\n", " args_data = \"\"\n", " for k,v in iteritems(module_args):\n", " args_data += '%s=\"%s\" ' % (k, pipes.quote(text_type(v)))\n", " self._transfer_data(args_file_path, args_data)\n", " elif module_style == 'non_native_want_json':\n", " self._transfer_data(args_file_path, json.dumps(module_args))\n", " display.debug(\"done transferring module to remote\")\n", "\n", " environment_string = self._compute_environment_string()\n", "\n", " if tmp and \"tmp\" in tmp and self._play_context.become and self._play_context.become_user != 'root':\n", " # deal with possible umask issues once sudo'ed to other user\n", " self._remote_chmod('a+r', remote_module_path)\n", " if args_file_path is not None:\n", " self._remote_chmod('a+r', args_file_path)\n", "\n", " cmd = \"\"\n", " in_data = None\n", "\n", " if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':\n", " in_data = module_data\n", " else:\n", " if remote_module_path:\n", " cmd = remote_module_path\n", "\n", " rm_tmp = None\n", " if tmp and \"tmp\" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:\n", " if not self._play_context.become or self._play_context.become_user == 'root':\n", " # not sudoing or sudoing to root, so can cleanup files in the same step\n", " rm_tmp = tmp\n", "\n", " cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path, rm_tmp=rm_tmp)\n", " cmd = cmd.strip()\n", "\n", " sudoable = True\n", " if module_name == \"accelerate\":\n", " # always run the accelerate module as the user\n", " # specified in the play, not the sudo_user\n", " sudoable = False\n", "\n", " res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)\n", "\n", " if tmp and \"tmp\" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:\n", " if self._play_context.become and self._play_context.become_user != 'root':\n", " # not sudoing to root, so maybe can't delete files as that other user\n", " # have to clean up temp files as original user in a second step\n", " cmd2 = self._connection._shell.remove(tmp, recurse=True)\n", " self._low_level_execute_command(cmd2, sudoable=False)\n", "\n", " try:\n", " data = json.loads(self._filter_leading_non_json_lines(res.get('stdout', u'')))\n", " except ValueError:\n", " # not valid json, lets try to capture error\n", " data = dict(failed=True, parsed=False)\n", " data['msg'] = \"MODULE FAILURE\"\n", " data['module_stdout'] = res.get('stdout', u'')\n", " if 'stderr' in res:\n", " data['module_stderr'] = res['stderr']\n", " if res['stderr'].startswith(u'Traceback'):\n", " data['exception'] = res['stderr']\n", "\n", " # pre-split stdout into lines, if stdout is in the data and there\n", " # isn't already a stdout_lines value there\n", " if 'stdout' in data and 'stdout_lines' not in data:\n", " data['stdout_lines'] = data.get('stdout', u'').splitlines()\n", "\n", " display.debug(\"done with _execute_module (%s, %s)\" % (module_name, module_args))\n", " return data\n", "\n", " def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=C.DEFAULT_EXECUTABLE, encoding_errors='replace'):\n", " '''\n", " This is the function which executes the low level shell command, which\n", " may be commands to create/remove directories for temporary files, or to\n", " run the module code or python directly when pipelining.\n", "\n", " :kwarg encoding_errors: If the value returned by the command isn't\n", " utf-8 then we have to figure out how to transform it to unicode.\n", " If the value is just going to be displayed to the user (or\n", " discarded) then the default of 'replace' is fine. If the data is\n", " used as a key or is going to be written back out to a file\n", " verbatim, then this won't work. May have to use some sort of\n", " replacement strategy (python3 could use surrogateescape)\n", " '''\n", "\n", " display.debug(\"_low_level_execute_command(): starting\")\n", " if not cmd:\n", " # this can happen with powershell modules when there is no analog to a Windows command (like chmod)\n", " display.debug(\"_low_level_execute_command(): no command, exiting\")\n", " return dict(stdout='', stderr='')\n", "\n", " allow_same_user = C.BECOME_ALLOW_SAME_USER\n", " same_user = self._play_context.become_user == self._play_context.remote_user\n", " if sudoable and self._play_context.become and (allow_same_user or not same_user):\n", " display.debug(\"_low_level_execute_command(): using become for this command\")\n", " cmd = self._play_context.make_become_cmd(cmd, executable=executable)\n", "\n", " if executable is not None and self._connection.allow_executable:\n", " cmd = executable + ' -c ' + pipes.quote(cmd)\n", "\n", " display.debug(\"_low_level_execute_command(): executing: %s\" % (cmd,))\n", " rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)\n", "\n", " # stdout and stderr may be either a file-like or a bytes object.\n", " # Convert either one to a text type\n", " if isinstance(stdout, binary_type):\n", " out = to_unicode(stdout, errors=encoding_errors)\n", " elif not isinstance(stdout, text_type):\n", " out = to_unicode(b''.join(stdout.readlines()), errors=encoding_errors)\n", " else:\n", " out = stdout\n", "\n", " if isinstance(stderr, binary_type):\n", " err = to_unicode(stderr, errors=encoding_errors)\n", " elif not isinstance(stderr, text_type):\n", " err = to_unicode(b''.join(stderr.readlines()), errors=encoding_errors)\n", " else:\n", " err = stderr\n", "\n", " if rc is None:\n", " rc = 0\n", "\n", " # be sure to remove the BECOME-SUCCESS message now\n", " out = self._strip_success_message(out)\n", "\n", " display.debug(\"_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s\" % (rc, stdout, stderr))\n", " return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err)\n", "\n", " def _get_first_available_file(self, faf, of=None, searchdir='files'):\n", "\n", " display.deprecated(\"first_available_file, use with_first_found or lookup('first_found',...) instead\")\n", " for fn in faf:\n", " fnt = self._templar.template(fn)\n", " if self._task._role is not None:\n", " lead = self._task._role._role_path\n", " else:\n", " lead = fnt\n", " fnd = self._loader.path_dwim_relative(lead, searchdir, fnt)\n", "\n", " if not os.path.exists(fnd) and of is not None:\n", " if self._task._role is not None:\n", " lead = self._task._role._role_path\n", " else:\n", " lead = of\n", " fnd = self._loader.path_dwim_relative(lead, searchdir, of)\n", "\n", " if os.path.exists(fnd):\n", " return fnd\n", "\n", " return None\n", "\n", " def _get_diff_data(self, destination, source, task_vars, source_file=True):\n", "\n", " diff = {}\n", " display.debug(\"Going to peek to see if file has changed permissions\")\n", " peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, diff_peek=True), task_vars=task_vars, persist_files=True)\n", "\n", " if not('failed' in peek_result and peek_result['failed']) or peek_result.get('rc', 0) == 0:\n", "\n", " if peek_result['state'] == 'absent':\n", " diff['before'] = ''\n", " elif peek_result['appears_binary']:\n", " diff['dst_binary'] = 1\n", " elif peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF:\n", " diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF\n", " else:\n", " display.debug(\"Slurping the file %s\" % source)\n", " dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), task_vars=task_vars, persist_files=True)\n", " if 'content' in dest_result:\n", " dest_contents = dest_result['content']\n", " if dest_result['encoding'] == 'base64':\n", " dest_contents = base64.b64decode(dest_contents)\n", " else:\n", " raise AnsibleError(\"unknown encoding in content option, failed: %s\" % dest_result)\n", " diff['before_header'] = destination\n", " diff['before'] = dest_contents\n", "\n", " if source_file:\n", " st = os.stat(source)\n", " if st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF:\n", " diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF\n", " else:\n", " display.debug(\"Reading local copy of the file %s\" % source)\n", " try:\n", " src = open(source)\n", " src_contents = src.read()\n", " except Exception as e:\n", " raise AnsibleError(\"Unexpected error while reading source (%s) for diff: %s \" % (source, str(e)))\n", " if \"\\x00\" in src_contents:\n", " diff['src_binary'] = 1\n", " else:\n", " diff['after_header'] = source\n", " diff['after'] = src_contents\n", " else:\n", " display.debug(\"source of file passed in\")\n", " diff['after_header'] = 'dynamically generated'\n", " diff['after'] = source\n", "\n", " if self._play_context.no_log:\n", " if 'before' in diff:\n", " diff[\"before\"] = \"\"\n", " if 'after' in diff:\n", " diff[\"after\"] = \" [[ Diff output has been hidden because 'no_log: true' was specified for this result ]]\"\n", "\n", " return diff\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010869565217391304, 0.02564102564102564, 0.022222222222222223, 0.02127659574468085, 0.024390243902439025, 0.023809523809523808, 0, 0.009259259259259259, 0.023809523809523808, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0.01098901098901099, 0, 0, 0, 0, 0.045454545454545456, 0.029411764705882353, 0, 0.04081632653061224, 0.0425531914893617, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010309278350515464, 0, 0, 0.010869565217391304, 0.00684931506849315, 0, 0, 0.01020408163265306, 0, 0.010101010101010102, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0.007042253521126761, 0, 0.009174311926605505, 0, 0.010638297872340425, 0.009900990099009901, 0.008, 0, 0, 0.008695652173913044, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0.041666666666666664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.007246376811594203, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0.010309278350515464, 0, 0, 0, 0, 0.00625, 0, 0, 0, 0, 0.01098901098901099, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0.012048192771084338, 0, 0, 0, 0.007874015748031496, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0.007874015748031496, 0, 0.011235955056179775, 0.013605442176870748, 0, 0, 0, 0, 0, 0.014814814814814815, 0.01834862385321101, 0.020833333333333332, 0, 0, 0, 0, 0, 0.010101010101010102, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0.007518796992481203, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.013157894736842105, 0.009433962264150943, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.038461538461538464, 0, 0, 0, 0, 0, 0, 0.006896551724137931, 0, 0, 0.009259259259259259, 0, 0, 0.011627906976744186, 0, 0, 0, 0.022222222222222223, 0, 0, 0, 0, 0, 0, 0, 0.010101010101010102, 0, 0, 0, 0, 0, 0, 0.022727272727272728, 0, 0.012195121951219513, 0, 0.018867924528301886, 0, 0.014705882352941176, 0, 0, 0, 0.023255813953488372, 0, 0.013888888888888888, 0, 0, 0, 0, 0.024390243902439025, 0.011904761904761904, 0.020833333333333332, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.014285714285714285, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0.01098901098901099, 0.011111111111111112, 0, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.007246376811594203, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0.010309278350515464, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0070921985815602835, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0.010638297872340425, 0.010416666666666666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0196078431372549, 0, 0, 0, 0, 0, 0, 0, 0, 0.009259259259259259, 0, 0, 0, 0, 0, 0, 0, 0, 0.007142857142857143, 0, 0, 0, 0, 0, 0, 0.008849557522123894, 0.011111111111111112, 0.011363636363636364, 0, 0, 0.007518796992481203, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0.008849557522123894, 0.011494252873563218, 0.011627906976744186, 0, 0, 0, 0, 0, 0.01098901098901099, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011235955056179775, 0, 0, 0.007352941176470588, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.008928571428571428, 0, 0, 0, 0, 0.011764705882352941, 0.011111111111111112, 0.011235955056179775, 0.012345679012345678, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009009009009009009, 0.012195121951219513, 0, 0, 0, 0.00909090909090909, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00641025641025641, 0, 0.01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.006711409395973154, 0, 0, 0, 0, 0, 0.009345794392523364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00819672131147541, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00819672131147541, 0, 0 ]
665
0.002248
#!/usr/bin/env python # encoding: utf-8 """ untitled.py Created by Nikolaus Sonnenschein on 2009-07-03. Copyright (c) 2009 Jacobs University of Bremen. All rights reserved. """ import os from tables import * from ifba.GlpkWrap.fluxdist import FBAsimulationResult def h5Container(fileName, lp, title="", complevel=5, expectedrows=1000000): """Constructs a h5 file container that is suitable for the storage of FBAsimulationResult objects.""" if os.path.exists(fileName): print 'file already exists - will not construct new one' return fileName h5 = openFile(fileName, 'w') numReacs = lp.getNumCols() class FBAsimulations(IsDescription): fluxactivity = Float32Col(shape=(numReacs), pos=0) knockoutEffects = Float32Col(shape=(numReacs), pos=2) lowerBounds = Float32Col(shape=(numReacs), pos=3) upperBounds = Float32Col(shape=(numReacs), pos=4) objective = Float32Col(shape=(numReacs), pos=5) timeStamp = Float32Col(pos=6) descr = StringCol(500, pos=7) h5.createArray(where=h5.root, name='rxnMapping', object=lp.getColumnIDs(), title='Index - ReactionID mapping') filters = Filters(complevel=5, complib='zlib', fletcher32=True, shuffle=True) h5.createTable(where=h5.root, description=FBAsimulations, name='simulations', title='Table containing the activities and media conditions for FBA simulations.', filters=filters, expectedrows=expectedrows) h5.flush() print h5 h5.close() return fileName class SimulationDB(object): """docstring for SimulationDB""" def __init__(self, h5file): self.h5container = openFile(h5file, 'r+') def writeSimulationResult(self, simulationResult): "Takes a SimulationResult object and store it to the database." table = self.h5container.root.simulations row = table.row colNames = table.colnames for col in colNames: try: attr = getattr(simulationResult, col) except NameError: print "Missing of a mandatory column field in the provided simulation result" sys.exit(-1) row[col] = attr row.append() self.h5container.flush() def retrieveSimulationResultByID(self): """docstring for re""" # raise NotImplementedError, "Do it looser!" table = self.h5container.root.simulations print table[1] def close(self): """Close the database.""" self.h5container.flush() self.h5container.close() print "bye bye" if __name__ == '__main__': pass
[ "#!/usr/bin/env python\n", "# encoding: utf-8\n", "\"\"\"\n", "untitled.py\n", "\n", "Created by Nikolaus Sonnenschein on 2009-07-03.\n", "Copyright (c) 2009 Jacobs University of Bremen. All rights reserved.\n", "\"\"\"\n", "\n", "import os\n", "from tables import *\n", "from ifba.GlpkWrap.fluxdist import FBAsimulationResult\n", "\n", "\n", "def h5Container(fileName, lp, title=\"\", complevel=5, expectedrows=1000000):\n", " \"\"\"Constructs a h5 file container that is suitable for the storage of FBAsimulationResult objects.\"\"\"\n", " if os.path.exists(fileName):\n", " print 'file already exists - will not construct new one'\n", " return fileName\n", " h5 = openFile(fileName, 'w')\n", " numReacs = lp.getNumCols()\n", " class FBAsimulations(IsDescription):\n", " fluxactivity = Float32Col(shape=(numReacs), pos=0)\n", " knockoutEffects = Float32Col(shape=(numReacs), pos=2)\n", " lowerBounds = Float32Col(shape=(numReacs), pos=3)\n", " upperBounds = Float32Col(shape=(numReacs), pos=4)\n", " objective = Float32Col(shape=(numReacs), pos=5)\n", " timeStamp = Float32Col(pos=6)\n", " descr = StringCol(500, pos=7)\n", " h5.createArray(where=h5.root, name='rxnMapping', object=lp.getColumnIDs(), title='Index - ReactionID mapping')\n", " filters = Filters(complevel=5, complib='zlib', fletcher32=True, shuffle=True)\n", " h5.createTable(where=h5.root, description=FBAsimulations, name='simulations', title='Table containing the activities and media conditions for FBA simulations.', filters=filters, expectedrows=expectedrows)\n", " h5.flush()\n", " print h5\n", " h5.close()\n", " return fileName\n", " \n", "class SimulationDB(object):\n", " \"\"\"docstring for SimulationDB\"\"\"\n", " def __init__(self, h5file):\n", " self.h5container = openFile(h5file, 'r+')\n", " \n", " def writeSimulationResult(self, simulationResult):\n", " \"Takes a SimulationResult object and store it to the database.\"\n", " table = self.h5container.root.simulations\n", " row = table.row\n", " colNames = table.colnames\n", " for col in colNames:\n", " try:\n", " attr = getattr(simulationResult, col)\n", " except NameError:\n", " print \"Missing of a mandatory column field in the provided simulation result\"\n", " sys.exit(-1)\n", " row[col] = attr\n", " row.append()\n", " self.h5container.flush()\n", " \n", " def retrieveSimulationResultByID(self):\n", " \"\"\"docstring for re\"\"\"\n", " # raise NotImplementedError, \"Do it looser!\"\n", " table = self.h5container.root.simulations\n", " print table[1]\n", " \n", " def close(self):\n", " \"\"\"Close the database.\"\"\"\n", " self.h5container.flush()\n", " self.h5container.close()\n", " print \"bye bye\"\n", " \n", "\n", "if __name__ == '__main__':\n", " pass" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009433962264150943, 0, 0, 0, 0, 0, 0.024390243902439025, 0, 0, 0, 0, 0, 0, 0, 0.008695652173913044, 0.012195121951219513, 0.004784688995215311, 0, 0, 0, 0, 0.2, 0.03571428571428571, 0, 0, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010638297872340425, 0, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0, 0, 0.1111111111111111, 0, 0, 0.125 ]
72
0.013391
#!/usr/bin/env python3 from setuptools import setup, find_packages extra_files = [ "templates/compiler.py.inc", "templates/run_static.py.inc", "templates/run_dynamic.py.inc", "templates/slurm-prepare-node.sh.inc", "templates/slurm-cleanup-node.sh.inc" ] sql_extra_files = [ "func.compare_region_wise2.sql", "func.experiments.sql", "func.recompilation.sql", "func.run_regions.sql", "func.total_dyncov_clean.sql", "func.total_speedup.sql", "func.compare_region_wise.sql", "func.project_region_time.sql", "func.run_durations.sql", "func.speedup.sql", "func.total_dyncov.sql", "func.pj-test-eval.sql" ] setup( name='benchbuild', version='1.3.2', url='https://github.com/PolyJIT/benchbuild', packages=find_packages(exclude=["docs", "extern", "filters", "linker", "src", "statistics", "tests", "results"]), package_data={"benchbuild.utils": extra_files, "benchbuild": sql_extra_files}, include_package_data=True, install_requires=[ "lazy==1.2", "SQLAlchemy==1.0.4", "dill==0.2.4", "plumbum>=1.5.0", "regex==2015.5.28", "wheel==0.24.0", "parse==1.6.6", "virtualenv==13.1.0", "sphinxcontrib-napoleon", "psycopg2", "sqlalchemy-migrate", "six>=1.7.0", "psutil>=4.0.0", "pylint>=1.5.5", "seaborn>=0.7.1", "pandas>=0.19.2", "matplotlib==1.5.3" ], author="Andreas Simbuerger", author_email="simbuerg@fim.uni-passau.de", description="This is the experiment driver for the benchbuild study", license="MIT", entry_points={ 'console_scripts': ['benchbuild=benchbuild.driver:main', 'container=benchbuild.container:main'] }, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Software Development :: Testing', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3' ], keywords="benchbuild experiments run-time", )
[ "#!/usr/bin/env python3\n", "from setuptools import setup, find_packages\n", "\n", "extra_files = [\n", " \"templates/compiler.py.inc\",\n", " \"templates/run_static.py.inc\",\n", " \"templates/run_dynamic.py.inc\",\n", " \"templates/slurm-prepare-node.sh.inc\",\n", " \"templates/slurm-cleanup-node.sh.inc\"\n", "]\n", "\n", "sql_extra_files = [\n", " \"func.compare_region_wise2.sql\",\n", " \"func.experiments.sql\",\n", " \"func.recompilation.sql\",\n", " \"func.run_regions.sql\",\n", " \"func.total_dyncov_clean.sql\",\n", " \"func.total_speedup.sql\",\n", " \"func.compare_region_wise.sql\",\n", " \"func.project_region_time.sql\",\n", " \"func.run_durations.sql\",\n", " \"func.speedup.sql\",\n", " \"func.total_dyncov.sql\",\n", " \"func.pj-test-eval.sql\"\n", "]\n", "\n", "setup(\n", " name='benchbuild',\n", " version='1.3.2',\n", " url='https://github.com/PolyJIT/benchbuild',\n", " packages=find_packages(exclude=[\"docs\", \"extern\", \"filters\", \"linker\",\n", " \"src\", \"statistics\", \"tests\", \"results\"]),\n", " package_data={\"benchbuild.utils\": extra_files,\n", " \"benchbuild\": sql_extra_files},\n", " include_package_data=True,\n", " install_requires=[\n", " \"lazy==1.2\", \"SQLAlchemy==1.0.4\", \"dill==0.2.4\", \"plumbum>=1.5.0\",\n", " \"regex==2015.5.28\", \"wheel==0.24.0\", \"parse==1.6.6\",\n", " \"virtualenv==13.1.0\", \"sphinxcontrib-napoleon\", \"psycopg2\",\n", " \"sqlalchemy-migrate\", \"six>=1.7.0\", \"psutil>=4.0.0\", \"pylint>=1.5.5\",\n", " \"seaborn>=0.7.1\", \"pandas>=0.19.2\", \"matplotlib==1.5.3\"\n", " ],\n", " author=\"Andreas Simbuerger\",\n", " author_email=\"simbuerg@fim.uni-passau.de\",\n", " description=\"This is the experiment driver for the benchbuild study\",\n", " license=\"MIT\",\n", " entry_points={\n", " 'console_scripts': ['benchbuild=benchbuild.driver:main',\n", " 'container=benchbuild.container:main']\n", " },\n", " classifiers=[\n", " 'Development Status :: 4 - Beta', 'Intended Audience :: Developers',\n", " 'Topic :: Software Development :: Testing',\n", " 'License :: OSI Approved :: MIT License',\n", " 'Programming Language :: Python :: 3'\n", " ],\n", " keywords=\"benchbuild experiments run-time\", )\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
57
0
#!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the SegWit changeover logic.""" from test_framework.test_framework import TrollcoinTestFramework from test_framework.util import * from test_framework.mininode import sha256, CTransaction, CTxIn, COutPoint, CTxOut, COIN, ToHex, FromHex from test_framework.address import script_to_p2sh, key_to_p2pkh from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE from io import BytesIO NODE_0 = 0 NODE_1 = 1 NODE_2 = 2 WIT_V0 = 0 WIT_V1 = 1 # Create a scriptPubKey corresponding to either a P2WPKH output for the # given pubkey, or a P2WSH output of a 1-of-1 multisig for the given # pubkey. Returns the hex encoding of the scriptPubKey. def witness_script(use_p2wsh, pubkey): if (use_p2wsh == False): # P2WPKH instead pubkeyhash = hash160(hex_str_to_bytes(pubkey)) pkscript = CScript([OP_0, pubkeyhash]) else: # 1-of-1 multisig witness_program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG]) scripthash = sha256(witness_program) pkscript = CScript([OP_0, scripthash]) return bytes_to_hex_str(pkscript) # Return a transaction (in hex) that spends the given utxo to a segwit output, # optionally wrapping the segwit output using P2SH. def create_witnessprogram(use_p2wsh, utxo, pubkey, encode_p2sh, amount): pkscript = hex_str_to_bytes(witness_script(use_p2wsh, pubkey)) if (encode_p2sh): p2sh_hash = hash160(pkscript) pkscript = CScript([OP_HASH160, p2sh_hash, OP_EQUAL]) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), b"")) tx.vout.append(CTxOut(int(amount*COIN), pkscript)) return ToHex(tx) # Create a transaction spending a given utxo to a segwit output corresponding # to the given pubkey: use_p2wsh determines whether to use P2WPKH or P2WSH; # encode_p2sh determines whether to wrap in P2SH. # sign=True will have the given node sign the transaction. # insert_redeem_script will be added to the scriptSig, if given. def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""): tx_to_witness = create_witnessprogram(use_p2wsh, utxo, pubkey, encode_p2sh, amount) if (sign): signed = node.signrawtransaction(tx_to_witness) assert("errors" not in signed or len(["errors"]) == 0) return node.sendrawtransaction(signed["hex"]) else: if (insert_redeem_script): tx = FromHex(CTransaction(), tx_to_witness) tx.vin[0].scriptSig += CScript([hex_str_to_bytes(insert_redeem_script)]) tx_to_witness = ToHex(tx) return node.sendrawtransaction(tx_to_witness) def getutxo(txid): utxo = {} utxo["vout"] = 0 utxo["txid"] = txid return utxo def find_unspent(node, min_value): for utxo in node.listunspent(): if utxo['amount'] >= min_value: return utxo class SegWitTest(TrollcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 3 def setup_network(self): self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, ["-walletprematurewitness", "-rpcserialversion=0"])) self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1"])) self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness"])) connect_nodes(self.nodes[1], 0) connect_nodes(self.nodes[2], 1) connect_nodes(self.nodes[0], 2) self.is_network_split = False self.sync_all() def success_mine(self, node, txid, sign, redeem_script=""): send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) block = node.generate(1) assert_equal(len(node.getblock(block[0])["tx"]), 2) sync_blocks(self.nodes) def skip_mine(self, node, txid, sign, redeem_script=""): send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) block = node.generate(1) assert_equal(len(node.getblock(block[0])["tx"]), 1) sync_blocks(self.nodes) def fail_accept(self, node, error_msg, txid, sign, redeem_script=""): assert_raises_jsonrpc(-26, error_msg, send_to_witness, 1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) def fail_mine(self, node, txid, sign, redeem_script=""): send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script) assert_raises_jsonrpc(-1, "CreateNewBlock: TestBlockValidity failed", node.generate, 1) sync_blocks(self.nodes) def run_test(self): self.nodes[0].generate(161) #block 161 self.log.info("Verify sigops are counted in GBT with pre-BIP141 rules before the fork") txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) tmpl = self.nodes[0].getblocktemplate({}) assert(tmpl['sizelimit'] == 1000000) assert('weightlimit' not in tmpl) assert(tmpl['sigoplimit'] == 20000) assert(tmpl['transactions'][0]['hash'] == txid) assert(tmpl['transactions'][0]['sigops'] == 2) tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']}) assert(tmpl['sizelimit'] == 1000000) assert('weightlimit' not in tmpl) assert(tmpl['sigoplimit'] == 20000) assert(tmpl['transactions'][0]['hash'] == txid) assert(tmpl['transactions'][0]['sigops'] == 2) self.nodes[0].generate(1) #block 162 balance_presetup = self.nodes[0].getbalance() self.pubkey = [] p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness for i in range(3): newaddress = self.nodes[i].getnewaddress() self.pubkey.append(self.nodes[i].validateaddress(newaddress)["pubkey"]) multiaddress = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]]) self.nodes[i].addwitnessaddress(newaddress) self.nodes[i].addwitnessaddress(multiaddress) p2sh_ids.append([]) wit_ids.append([]) for v in range(2): p2sh_ids[i].append([]) wit_ids[i].append([]) for i in range(5): for n in range(3): for v in range(2): wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999"))) p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999"))) self.nodes[0].generate(1) #block 163 sync_blocks(self.nodes) # Make sure all nodes recognize the transactions as theirs assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal("49.999") + 50) assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999")) assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999")) self.nodes[0].generate(260) #block 423 sync_blocks(self.nodes) self.log.info("Verify default node can't accept any witness format txs before fork") # unsigned, no scriptsig self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False) self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False) self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False) self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False) # unsigned with redeem script self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0])) self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0])) # signed self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V0][0], True) self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V1][0], True) self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V0][0], True) self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V1][0], True) self.log.info("Verify witness txs are skipped for mining before the fork") self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424 self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425 self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426 self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427 # TODO: An old node would see these txs without witnesses and be able to mine them self.log.info("Verify unsigned bare witness txs in versionbits-setting blocks are valid before the fork") self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][1], False) #block 428 self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][1], False) #block 429 self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid") self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V0][1], False) self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V1][1], False) self.log.info("Verify unsigned p2sh witness txs with a redeem script in versionbits-settings blocks are valid before the fork") self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][1], False, witness_script(False, self.pubkey[2])) #block 430 self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][1], False, witness_script(True, self.pubkey[2])) #block 431 self.log.info("Verify previous witness txs skipped for mining can now be mined") assert_equal(len(self.nodes[2].getrawmempool()), 4) block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3) sync_blocks(self.nodes) assert_equal(len(self.nodes[2].getrawmempool()), 0) segwit_tx_list = self.nodes[2].getblock(block[0])["tx"] assert_equal(len(segwit_tx_list), 5) self.log.info("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag") assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False)) assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False)) for i in range(len(segwit_tx_list)): tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"]) assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i])) assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i])) assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"]) assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"]) assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness())) self.log.info("Verify witness txs without witness data are invalid after the fork") self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][2], False) self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][2], False) self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][2], False, witness_script(False, self.pubkey[2])) self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][2], False, witness_script(True, self.pubkey[2])) self.log.info("Verify default node can now use witness txs") self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432 self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433 self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434 self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435 self.log.info("Verify sigops are counted in GBT with BIP141 rules after the fork") txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']}) assert(tmpl['sizelimit'] >= 3999577) # actual maximum size is lower due to minimum mandatory non-witness data assert(tmpl['weightlimit'] == 4000000) assert(tmpl['sigoplimit'] == 80000) assert(tmpl['transactions'][0]['txid'] == txid) assert(tmpl['transactions'][0]['sigops'] == 8) self.nodes[0].generate(1) # Mine a block to clear the gbt cache self.log.info("Non-segwit miners are able to use GBT response after activation.") # Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) -> # tx2 (segwit input, paying to a non-segwit output) -> # tx3 (non-segwit input, paying to a non-segwit output). # tx1 is allowed to appear in the block, but no others. txid1 = send_to_witness(1, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996")) hex_tx = self.nodes[0].gettransaction(txid)['hex'] tx = FromHex(CTransaction(), hex_tx) assert(tx.wit.is_null()) # This should not be a segwit input assert(txid1 in self.nodes[0].getrawmempool()) # Now create tx2, which will spend from txid1. tx = CTransaction() tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b'')) tx.vout.append(CTxOut(int(49.99*COIN), CScript([OP_TRUE]))) tx2_hex = self.nodes[0].signrawtransaction(ToHex(tx))['hex'] txid2 = self.nodes[0].sendrawtransaction(tx2_hex) tx = FromHex(CTransaction(), tx2_hex) assert(not tx.wit.is_null()) # Now create tx3, which will spend from txid2 tx = CTransaction() tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b"")) tx.vout.append(CTxOut(int(49.95*COIN), CScript([OP_TRUE]))) # Huge fee tx.calc_sha256() txid3 = self.nodes[0].sendrawtransaction(ToHex(tx)) assert(tx.wit.is_null()) assert(txid3 in self.nodes[0].getrawmempool()) # Now try calling getblocktemplate() without segwit support. template = self.nodes[0].getblocktemplate() # Check that tx1 is the only transaction of the 3 in the template. template_txids = [ t['txid'] for t in template['transactions'] ] assert(txid2 not in template_txids and txid3 not in template_txids) assert(txid1 in template_txids) # Check that running with segwit support results in all 3 being included. template = self.nodes[0].getblocktemplate({"rules": ["segwit"]}) template_txids = [ t['txid'] for t in template['transactions'] ] assert(txid1 in template_txids) assert(txid2 in template_txids) assert(txid3 in template_txids) # Mine a block to clear the gbt cache again. self.nodes[0].generate(1) self.log.info("Verify behaviour of importaddress, addwitnessaddress and listunspent") # Some public keys to be used later pubkeys = [ "0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb "02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97 "04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV "02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd "036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66 "0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K "0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ ] # Import a compressed key and an uncompressed key, generate some multisig addresses self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn") uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"] self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR") compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"] assert ((self.nodes[0].validateaddress(uncompressed_spendable_address[0])['iscompressed'] == False)) assert ((self.nodes[0].validateaddress(compressed_spendable_address[0])['iscompressed'] == True)) self.nodes[0].importpubkey(pubkeys[0]) compressed_solvable_address = [key_to_p2pkh(pubkeys[0])] self.nodes[0].importpubkey(pubkeys[1]) compressed_solvable_address.append(key_to_p2pkh(pubkeys[1])) self.nodes[0].importpubkey(pubkeys[2]) uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])] spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress spendable_after_importaddress = [] # These outputs should be seen after importaddress solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress solvable_anytime = [] # These outputs should be solvable after importpubkey unseen_anytime = [] # These outputs should never be seen uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])) uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])) compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])) uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]])) compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])) compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]])) unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "2NDP3jLWAFT8NDAiUa9qiE6oBt2awmMq7Dx"] # Test multisig_without_privkey # We have 2 public keys without private keys, use addmultisigaddress to add to wallet. # Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address. multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]]) script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG]) solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL])) for i in compressed_spendable_address: v = self.nodes[0].validateaddress(i) if (v['isscript']): [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) # bare and p2sh multisig with compressed keys should always be spendable spendable_anytime.extend([bare, p2sh]) # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh]) else: [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) # normal P2PKH and P2PK with compressed keys should always be spendable spendable_anytime.extend([p2pkh, p2pk]) # P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are spendable after direct importaddress spendable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) for i in uncompressed_spendable_address: v = self.nodes[0].validateaddress(i) if (v['isscript']): [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) # bare and p2sh multisig with uncompressed keys should always be spendable spendable_anytime.extend([bare, p2sh]) # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen unseen_anytime.extend([p2wsh, p2sh_p2wsh]) else: [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) # normal P2PKH and P2PK with uncompressed keys should always be spendable spendable_anytime.extend([p2pkh, p2pk]) # P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh]) # witness with uncompressed keys are never seen unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) for i in compressed_solvable_address: v = self.nodes[0].validateaddress(i) if (v['isscript']): # Multisig without private is not seen after addmultisigaddress, but seen after importaddress [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh]) else: [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) # normal P2PKH and P2PK with compressed keys should always be seen solvable_anytime.extend([p2pkh, p2pk]) # P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are seen after direct importaddress solvable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) for i in uncompressed_solvable_address: v = self.nodes[0].validateaddress(i) if (v['isscript']): [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) # Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress solvable_after_importaddress.extend([bare, p2sh]) # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen unseen_anytime.extend([p2wsh, p2sh_p2wsh]) else: [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) # normal P2PKH and P2PK with uncompressed keys should always be seen solvable_anytime.extend([p2pkh, p2pk]) # P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh]) # witness with uncompressed keys are never seen unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) op1 = CScript([OP_1]) op0 = CScript([OP_0]) # 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)] unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D") unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG]) unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)]) p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL]) p2wshop1 = CScript([OP_0, sha256(op1)]) unsolvable_after_importaddress.append(unsolvablep2pkh) unsolvable_after_importaddress.append(unsolvablep2wshp2pkh) unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script unsolvable_after_importaddress.append(p2wshop1) unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided unsolvable_after_importaddress.append(p2shop0) spendable_txid = [] solvable_txid = [] spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2)) solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1)) self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0) importlist = [] for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address: v = self.nodes[0].validateaddress(i) if (v['isscript']): bare = hex_str_to_bytes(v['hex']) importlist.append(bytes_to_hex_str(bare)) importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)]))) else: pubkey = hex_str_to_bytes(v['pubkey']) p2pk = CScript([pubkey, OP_CHECKSIG]) p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG]) importlist.append(bytes_to_hex_str(p2pk)) importlist.append(bytes_to_hex_str(p2pkh)) importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)]))) importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)]))) importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)]))) importlist.append(bytes_to_hex_str(unsolvablep2pkh)) importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh)) importlist.append(bytes_to_hex_str(op1)) importlist.append(bytes_to_hex_str(p2wshop1)) for i in importlist: # import all generated addresses. The wallet already has the private keys for some of these, so catch JSON RPC # exceptions and continue. try: self.nodes[0].importaddress(i,"",False,True) except JSONRPCException as exp: assert_equal(exp.error["message"], "The wallet already contains the private key for this address or script") assert_equal(exp.error["code"], -4) self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2)) solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1)) self.mine_and_test_listunspent(unsolvable_after_importaddress, 1) self.mine_and_test_listunspent(unseen_anytime, 0) # addwitnessaddress should refuse to return a witness address if an uncompressed key is used or the address is # not in the wallet # note that no witness address should be returned by unsolvable addresses # the multisig_without_privkey_address will fail because its keys were not added with importpubkey for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address + [multisig_without_privkey_address]: assert_raises_jsonrpc(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i) for i in compressed_spendable_address + compressed_solvable_address: witaddress = self.nodes[0].addwitnessaddress(i) # addwitnessaddress should return the same address if it is a known P2SH-witness address assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress)) spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2)) solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1)) self.mine_and_test_listunspent(unsolvable_after_importaddress, 1) self.mine_and_test_listunspent(unseen_anytime, 0) # Repeat some tests. This time we don't add witness scripts with importaddress # Import a compressed key and an uncompressed key, generate some multisig addresses self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH") uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"] self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw") compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"] self.nodes[0].importpubkey(pubkeys[5]) compressed_solvable_address = [key_to_p2pkh(pubkeys[5])] self.nodes[0].importpubkey(pubkeys[6]) uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])] spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable unseen_anytime = [] # These outputs should never be seen uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])) uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])) compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])) uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]])) compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])) premature_witaddress = [] for i in compressed_spendable_address: v = self.nodes[0].validateaddress(i) if (v['isscript']): [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh]) premature_witaddress.append(script_to_p2sh(p2wsh)) else: [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) # P2WPKH, P2SH_P2WPKH are spendable after addwitnessaddress spendable_after_addwitnessaddress.extend([p2wpkh, p2sh_p2wpkh]) premature_witaddress.append(script_to_p2sh(p2wpkh)) for i in uncompressed_spendable_address + uncompressed_solvable_address: v = self.nodes[0].validateaddress(i) if (v['isscript']): [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen unseen_anytime.extend([p2wsh, p2sh_p2wsh]) else: [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) # P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen unseen_anytime.extend([p2wpkh, p2sh_p2wpkh]) for i in compressed_solvable_address: v = self.nodes[0].validateaddress(i) if (v['isscript']): # P2WSH multisig without private key are seen after addwitnessaddress [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh]) premature_witaddress.append(script_to_p2sh(p2wsh)) else: [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) # P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after addwitnessaddress solvable_after_addwitnessaddress.extend([p2wpkh, p2sh_p2wpkh]) premature_witaddress.append(script_to_p2sh(p2wpkh)) self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0) # addwitnessaddress should refuse to return a witness address if an uncompressed key is used # note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress # premature_witaddress are not accepted until the script is added with addwitnessaddress first for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress + [compressed_solvable_address[1]]: # This will raise an exception assert_raises_jsonrpc(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i) # after importaddress it should pass addwitnessaddress v = self.nodes[0].validateaddress(compressed_solvable_address[1]) self.nodes[0].importaddress(v['hex'],"",False,True) for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress: witaddress = self.nodes[0].addwitnessaddress(i) assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress)) spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress, 2)) solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress, 1)) self.mine_and_test_listunspent(unseen_anytime, 0) # Check that spendable outputs are really spendable self.create_and_mine_tx_from_txids(spendable_txid) # import all the private keys so solvable addresses become spendable self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb") self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97") self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV") self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd") self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66") self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K") self.create_and_mine_tx_from_txids(solvable_txid) def mine_and_test_listunspent(self, script_list, ismine): utxo = find_unspent(self.nodes[0], 50) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout']))) for i in script_list: tx.vout.append(CTxOut(10000000, i)) tx.rehash() signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex'] txid = self.nodes[0].sendrawtransaction(signresults, True) self.nodes[0].generate(1) sync_blocks(self.nodes) watchcount = 0 spendcount = 0 for i in self.nodes[0].listunspent(): if (i['txid'] == txid): watchcount += 1 if (i['spendable'] == True): spendcount += 1 if (ismine == 2): assert_equal(spendcount, len(script_list)) elif (ismine == 1): assert_equal(watchcount, len(script_list)) assert_equal(spendcount, 0) else: assert_equal(watchcount, 0) return txid def p2sh_address_to_script(self,v): bare = CScript(hex_str_to_bytes(v['hex'])) p2sh = CScript(hex_str_to_bytes(v['scriptPubKey'])) p2wsh = CScript([OP_0, sha256(bare)]) p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL]) return([bare, p2sh, p2wsh, p2sh_p2wsh]) def p2pkh_address_to_script(self,v): pubkey = hex_str_to_bytes(v['pubkey']) p2wpkh = CScript([OP_0, hash160(pubkey)]) p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL]) p2pk = CScript([pubkey, OP_CHECKSIG]) p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey'])) p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL]) p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL]) p2wsh_p2pk = CScript([OP_0, sha256(p2pk)]) p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)]) p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL]) p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL]) return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] def create_and_mine_tx_from_txids(self, txids, success = True): tx = CTransaction() for i in txids: txtmp = CTransaction() txraw = self.nodes[0].getrawtransaction(i) f = BytesIO(hex_str_to_bytes(txraw)) txtmp.deserialize(f) for j in range(len(txtmp.vout)): tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j))) tx.vout.append(CTxOut(0, CScript())) tx.rehash() signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex'] self.nodes[0].sendrawtransaction(signresults, True) self.nodes[0].generate(1) sync_blocks(self.nodes) if __name__ == '__main__': SegWitTest().main()
[ "#!/usr/bin/env python3\n", "# Copyright (c) 2016 The Bitcoin Core developers\n", "# Distributed under the MIT software license, see the accompanying\n", "# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n", "\"\"\"Test the SegWit changeover logic.\"\"\"\n", "\n", "from test_framework.test_framework import TrollcoinTestFramework\n", "from test_framework.util import *\n", "from test_framework.mininode import sha256, CTransaction, CTxIn, COutPoint, CTxOut, COIN, ToHex, FromHex\n", "from test_framework.address import script_to_p2sh, key_to_p2pkh\n", "from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE\n", "from io import BytesIO\n", "\n", "NODE_0 = 0\n", "NODE_1 = 1\n", "NODE_2 = 2\n", "WIT_V0 = 0\n", "WIT_V1 = 1\n", "\n", "# Create a scriptPubKey corresponding to either a P2WPKH output for the\n", "# given pubkey, or a P2WSH output of a 1-of-1 multisig for the given\n", "# pubkey. Returns the hex encoding of the scriptPubKey.\n", "def witness_script(use_p2wsh, pubkey):\n", " if (use_p2wsh == False):\n", " # P2WPKH instead\n", " pubkeyhash = hash160(hex_str_to_bytes(pubkey))\n", " pkscript = CScript([OP_0, pubkeyhash])\n", " else:\n", " # 1-of-1 multisig\n", " witness_program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])\n", " scripthash = sha256(witness_program)\n", " pkscript = CScript([OP_0, scripthash])\n", " return bytes_to_hex_str(pkscript)\n", "\n", "# Return a transaction (in hex) that spends the given utxo to a segwit output,\n", "# optionally wrapping the segwit output using P2SH.\n", "def create_witnessprogram(use_p2wsh, utxo, pubkey, encode_p2sh, amount):\n", " pkscript = hex_str_to_bytes(witness_script(use_p2wsh, pubkey))\n", " if (encode_p2sh):\n", " p2sh_hash = hash160(pkscript)\n", " pkscript = CScript([OP_HASH160, p2sh_hash, OP_EQUAL])\n", " tx = CTransaction()\n", " tx.vin.append(CTxIn(COutPoint(int(utxo[\"txid\"], 16), utxo[\"vout\"]), b\"\"))\n", " tx.vout.append(CTxOut(int(amount*COIN), pkscript))\n", " return ToHex(tx)\n", "\n", "# Create a transaction spending a given utxo to a segwit output corresponding\n", "# to the given pubkey: use_p2wsh determines whether to use P2WPKH or P2WSH;\n", "# encode_p2sh determines whether to wrap in P2SH.\n", "# sign=True will have the given node sign the transaction.\n", "# insert_redeem_script will be added to the scriptSig, if given.\n", "def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=\"\"):\n", " tx_to_witness = create_witnessprogram(use_p2wsh, utxo, pubkey, encode_p2sh, amount)\n", " if (sign):\n", " signed = node.signrawtransaction(tx_to_witness)\n", " assert(\"errors\" not in signed or len([\"errors\"]) == 0)\n", " return node.sendrawtransaction(signed[\"hex\"])\n", " else:\n", " if (insert_redeem_script):\n", " tx = FromHex(CTransaction(), tx_to_witness)\n", " tx.vin[0].scriptSig += CScript([hex_str_to_bytes(insert_redeem_script)])\n", " tx_to_witness = ToHex(tx)\n", "\n", " return node.sendrawtransaction(tx_to_witness)\n", "\n", "def getutxo(txid):\n", " utxo = {}\n", " utxo[\"vout\"] = 0\n", " utxo[\"txid\"] = txid\n", " return utxo\n", "\n", "def find_unspent(node, min_value):\n", " for utxo in node.listunspent():\n", " if utxo['amount'] >= min_value:\n", " return utxo\n", "\n", "class SegWitTest(TrollcoinTestFramework):\n", "\n", " def __init__(self):\n", " super().__init__()\n", " self.setup_clean_chain = True\n", " self.num_nodes = 3\n", "\n", " def setup_network(self):\n", " self.nodes = []\n", " self.nodes.append(start_node(0, self.options.tmpdir, [\"-walletprematurewitness\", \"-rpcserialversion=0\"]))\n", " self.nodes.append(start_node(1, self.options.tmpdir, [\"-blockversion=4\", \"-promiscuousmempoolflags=517\", \"-prematurewitness\", \"-walletprematurewitness\", \"-rpcserialversion=1\"]))\n", " self.nodes.append(start_node(2, self.options.tmpdir, [\"-blockversion=536870915\", \"-promiscuousmempoolflags=517\", \"-prematurewitness\", \"-walletprematurewitness\"]))\n", " connect_nodes(self.nodes[1], 0)\n", " connect_nodes(self.nodes[2], 1)\n", " connect_nodes(self.nodes[0], 2)\n", " self.is_network_split = False\n", " self.sync_all()\n", "\n", " def success_mine(self, node, txid, sign, redeem_script=\"\"):\n", " send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal(\"49.998\"), sign, redeem_script)\n", " block = node.generate(1)\n", " assert_equal(len(node.getblock(block[0])[\"tx\"]), 2)\n", " sync_blocks(self.nodes)\n", "\n", " def skip_mine(self, node, txid, sign, redeem_script=\"\"):\n", " send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal(\"49.998\"), sign, redeem_script)\n", " block = node.generate(1)\n", " assert_equal(len(node.getblock(block[0])[\"tx\"]), 1)\n", " sync_blocks(self.nodes)\n", "\n", " def fail_accept(self, node, error_msg, txid, sign, redeem_script=\"\"):\n", " assert_raises_jsonrpc(-26, error_msg, send_to_witness, 1, node, getutxo(txid), self.pubkey[0], False, Decimal(\"49.998\"), sign, redeem_script)\n", "\n", " def fail_mine(self, node, txid, sign, redeem_script=\"\"):\n", " send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal(\"49.998\"), sign, redeem_script)\n", " assert_raises_jsonrpc(-1, \"CreateNewBlock: TestBlockValidity failed\", node.generate, 1)\n", " sync_blocks(self.nodes)\n", "\n", " def run_test(self):\n", " self.nodes[0].generate(161) #block 161\n", "\n", " self.log.info(\"Verify sigops are counted in GBT with pre-BIP141 rules before the fork\")\n", " txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)\n", " tmpl = self.nodes[0].getblocktemplate({})\n", " assert(tmpl['sizelimit'] == 1000000)\n", " assert('weightlimit' not in tmpl)\n", " assert(tmpl['sigoplimit'] == 20000)\n", " assert(tmpl['transactions'][0]['hash'] == txid)\n", " assert(tmpl['transactions'][0]['sigops'] == 2)\n", " tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})\n", " assert(tmpl['sizelimit'] == 1000000)\n", " assert('weightlimit' not in tmpl)\n", " assert(tmpl['sigoplimit'] == 20000)\n", " assert(tmpl['transactions'][0]['hash'] == txid)\n", " assert(tmpl['transactions'][0]['sigops'] == 2)\n", " self.nodes[0].generate(1) #block 162\n", "\n", " balance_presetup = self.nodes[0].getbalance()\n", " self.pubkey = []\n", " p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh\n", " wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness\n", " for i in range(3):\n", " newaddress = self.nodes[i].getnewaddress()\n", " self.pubkey.append(self.nodes[i].validateaddress(newaddress)[\"pubkey\"])\n", " multiaddress = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]])\n", " self.nodes[i].addwitnessaddress(newaddress)\n", " self.nodes[i].addwitnessaddress(multiaddress)\n", " p2sh_ids.append([])\n", " wit_ids.append([])\n", " for v in range(2):\n", " p2sh_ids[i].append([])\n", " wit_ids[i].append([])\n", "\n", " for i in range(5):\n", " for n in range(3):\n", " for v in range(2):\n", " wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], False, Decimal(\"49.999\")))\n", " p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], True, Decimal(\"49.999\")))\n", "\n", " self.nodes[0].generate(1) #block 163\n", " sync_blocks(self.nodes)\n", "\n", " # Make sure all nodes recognize the transactions as theirs\n", " assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal(\"49.999\") + 50)\n", " assert_equal(self.nodes[1].getbalance(), 20*Decimal(\"49.999\"))\n", " assert_equal(self.nodes[2].getbalance(), 20*Decimal(\"49.999\"))\n", "\n", " self.nodes[0].generate(260) #block 423\n", " sync_blocks(self.nodes)\n", "\n", " self.log.info(\"Verify default node can't accept any witness format txs before fork\")\n", " # unsigned, no scriptsig\n", " self.fail_accept(self.nodes[0], \"mandatory-script-verify-flag\", wit_ids[NODE_0][WIT_V0][0], False)\n", " self.fail_accept(self.nodes[0], \"mandatory-script-verify-flag\", wit_ids[NODE_0][WIT_V1][0], False)\n", " self.fail_accept(self.nodes[0], \"mandatory-script-verify-flag\", p2sh_ids[NODE_0][WIT_V0][0], False)\n", " self.fail_accept(self.nodes[0], \"mandatory-script-verify-flag\", p2sh_ids[NODE_0][WIT_V1][0], False)\n", " # unsigned with redeem script\n", " self.fail_accept(self.nodes[0], \"mandatory-script-verify-flag\", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0]))\n", " self.fail_accept(self.nodes[0], \"mandatory-script-verify-flag\", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0]))\n", " # signed\n", " self.fail_accept(self.nodes[0], \"no-witness-yet\", wit_ids[NODE_0][WIT_V0][0], True)\n", " self.fail_accept(self.nodes[0], \"no-witness-yet\", wit_ids[NODE_0][WIT_V1][0], True)\n", " self.fail_accept(self.nodes[0], \"no-witness-yet\", p2sh_ids[NODE_0][WIT_V0][0], True)\n", " self.fail_accept(self.nodes[0], \"no-witness-yet\", p2sh_ids[NODE_0][WIT_V1][0], True)\n", "\n", " self.log.info(\"Verify witness txs are skipped for mining before the fork\")\n", " self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424\n", " self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425\n", " self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426\n", " self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427\n", "\n", " # TODO: An old node would see these txs without witnesses and be able to mine them\n", "\n", " self.log.info(\"Verify unsigned bare witness txs in versionbits-setting blocks are valid before the fork\")\n", " self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][1], False) #block 428\n", " self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][1], False) #block 429\n", "\n", " self.log.info(\"Verify unsigned p2sh witness txs without a redeem script are invalid\")\n", " self.fail_accept(self.nodes[2], \"mandatory-script-verify-flag\", p2sh_ids[NODE_2][WIT_V0][1], False)\n", " self.fail_accept(self.nodes[2], \"mandatory-script-verify-flag\", p2sh_ids[NODE_2][WIT_V1][1], False)\n", "\n", " self.log.info(\"Verify unsigned p2sh witness txs with a redeem script in versionbits-settings blocks are valid before the fork\")\n", " self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][1], False, witness_script(False, self.pubkey[2])) #block 430\n", " self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][1], False, witness_script(True, self.pubkey[2])) #block 431\n", "\n", " self.log.info(\"Verify previous witness txs skipped for mining can now be mined\")\n", " assert_equal(len(self.nodes[2].getrawmempool()), 4)\n", " block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3)\n", " sync_blocks(self.nodes)\n", " assert_equal(len(self.nodes[2].getrawmempool()), 0)\n", " segwit_tx_list = self.nodes[2].getblock(block[0])[\"tx\"]\n", " assert_equal(len(segwit_tx_list), 5)\n", "\n", " self.log.info(\"Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag\")\n", " assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False))\n", " assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False))\n", " for i in range(len(segwit_tx_list)):\n", " tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])[\"hex\"])\n", " assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i]))\n", " assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i]))\n", " assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])[\"hex\"])\n", " assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])[\"hex\"])\n", " assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness()))\n", "\n", " self.log.info(\"Verify witness txs without witness data are invalid after the fork\")\n", " self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][2], False)\n", " self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][2], False)\n", " self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][2], False, witness_script(False, self.pubkey[2]))\n", " self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][2], False, witness_script(True, self.pubkey[2]))\n", "\n", " self.log.info(\"Verify default node can now use witness txs\")\n", " self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432\n", " self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433\n", " self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434\n", " self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435\n", "\n", " self.log.info(\"Verify sigops are counted in GBT with BIP141 rules after the fork\")\n", " txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)\n", " tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})\n", " assert(tmpl['sizelimit'] >= 3999577) # actual maximum size is lower due to minimum mandatory non-witness data\n", " assert(tmpl['weightlimit'] == 4000000)\n", " assert(tmpl['sigoplimit'] == 80000)\n", " assert(tmpl['transactions'][0]['txid'] == txid)\n", " assert(tmpl['transactions'][0]['sigops'] == 8)\n", "\n", " self.nodes[0].generate(1) # Mine a block to clear the gbt cache\n", "\n", " self.log.info(\"Non-segwit miners are able to use GBT response after activation.\")\n", " # Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) ->\n", " # tx2 (segwit input, paying to a non-segwit output) ->\n", " # tx3 (non-segwit input, paying to a non-segwit output).\n", " # tx1 is allowed to appear in the block, but no others.\n", " txid1 = send_to_witness(1, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[0], False, Decimal(\"49.996\"))\n", " hex_tx = self.nodes[0].gettransaction(txid)['hex']\n", " tx = FromHex(CTransaction(), hex_tx)\n", " assert(tx.wit.is_null()) # This should not be a segwit input\n", " assert(txid1 in self.nodes[0].getrawmempool())\n", "\n", " # Now create tx2, which will spend from txid1.\n", " tx = CTransaction()\n", " tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b''))\n", " tx.vout.append(CTxOut(int(49.99*COIN), CScript([OP_TRUE])))\n", " tx2_hex = self.nodes[0].signrawtransaction(ToHex(tx))['hex']\n", " txid2 = self.nodes[0].sendrawtransaction(tx2_hex)\n", " tx = FromHex(CTransaction(), tx2_hex)\n", " assert(not tx.wit.is_null())\n", "\n", " # Now create tx3, which will spend from txid2\n", " tx = CTransaction()\n", " tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b\"\"))\n", " tx.vout.append(CTxOut(int(49.95*COIN), CScript([OP_TRUE]))) # Huge fee\n", " tx.calc_sha256()\n", " txid3 = self.nodes[0].sendrawtransaction(ToHex(tx))\n", " assert(tx.wit.is_null())\n", " assert(txid3 in self.nodes[0].getrawmempool())\n", "\n", " # Now try calling getblocktemplate() without segwit support.\n", " template = self.nodes[0].getblocktemplate()\n", "\n", " # Check that tx1 is the only transaction of the 3 in the template.\n", " template_txids = [ t['txid'] for t in template['transactions'] ]\n", " assert(txid2 not in template_txids and txid3 not in template_txids)\n", " assert(txid1 in template_txids)\n", "\n", " # Check that running with segwit support results in all 3 being included.\n", " template = self.nodes[0].getblocktemplate({\"rules\": [\"segwit\"]})\n", " template_txids = [ t['txid'] for t in template['transactions'] ]\n", " assert(txid1 in template_txids)\n", " assert(txid2 in template_txids)\n", " assert(txid3 in template_txids)\n", "\n", " # Mine a block to clear the gbt cache again.\n", " self.nodes[0].generate(1)\n", "\n", " self.log.info(\"Verify behaviour of importaddress, addwitnessaddress and listunspent\")\n", "\n", " # Some public keys to be used later\n", " pubkeys = [\n", " \"0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242\", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb\n", " \"02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF\", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97\n", " \"04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E\", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV\n", " \"02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538\", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd\n", " \"036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228\", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66\n", " \"0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC\", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K\n", " \"0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84\", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ\n", " ]\n", "\n", " # Import a compressed key and an uncompressed key, generate some multisig addresses\n", " self.nodes[0].importprivkey(\"92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn\")\n", " uncompressed_spendable_address = [\"mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu\"]\n", " self.nodes[0].importprivkey(\"cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR\")\n", " compressed_spendable_address = [\"mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe\"]\n", " assert ((self.nodes[0].validateaddress(uncompressed_spendable_address[0])['iscompressed'] == False))\n", " assert ((self.nodes[0].validateaddress(compressed_spendable_address[0])['iscompressed'] == True))\n", "\n", " self.nodes[0].importpubkey(pubkeys[0])\n", " compressed_solvable_address = [key_to_p2pkh(pubkeys[0])]\n", " self.nodes[0].importpubkey(pubkeys[1])\n", " compressed_solvable_address.append(key_to_p2pkh(pubkeys[1]))\n", " self.nodes[0].importpubkey(pubkeys[2])\n", " uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])]\n", "\n", " spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress\n", " spendable_after_importaddress = [] # These outputs should be seen after importaddress\n", " solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable\n", " unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress\n", " solvable_anytime = [] # These outputs should be solvable after importpubkey\n", " unseen_anytime = [] # These outputs should never be seen\n", "\n", " uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]]))\n", " uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]]))\n", " compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]]))\n", " uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]]))\n", " compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]]))\n", " compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]]))\n", " unknown_address = [\"mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT\", \"2NDP3jLWAFT8NDAiUa9qiE6oBt2awmMq7Dx\"]\n", "\n", " # Test multisig_without_privkey\n", " # We have 2 public keys without private keys, use addmultisigaddress to add to wallet.\n", " # Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address.\n", "\n", " multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])\n", " script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG])\n", " solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL]))\n", "\n", " for i in compressed_spendable_address:\n", " v = self.nodes[0].validateaddress(i)\n", " if (v['isscript']):\n", " [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)\n", " # bare and p2sh multisig with compressed keys should always be spendable\n", " spendable_anytime.extend([bare, p2sh])\n", " # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress\n", " spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh])\n", " else:\n", " [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)\n", " # normal P2PKH and P2PK with compressed keys should always be spendable\n", " spendable_anytime.extend([p2pkh, p2pk])\n", " # P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are spendable after direct importaddress\n", " spendable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])\n", "\n", " for i in uncompressed_spendable_address:\n", " v = self.nodes[0].validateaddress(i)\n", " if (v['isscript']):\n", " [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)\n", " # bare and p2sh multisig with uncompressed keys should always be spendable\n", " spendable_anytime.extend([bare, p2sh])\n", " # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen\n", " unseen_anytime.extend([p2wsh, p2sh_p2wsh])\n", " else:\n", " [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)\n", " # normal P2PKH and P2PK with uncompressed keys should always be spendable\n", " spendable_anytime.extend([p2pkh, p2pk])\n", " # P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress\n", " spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])\n", " # witness with uncompressed keys are never seen\n", " unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])\n", "\n", " for i in compressed_solvable_address:\n", " v = self.nodes[0].validateaddress(i)\n", " if (v['isscript']):\n", " # Multisig without private is not seen after addmultisigaddress, but seen after importaddress\n", " [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)\n", " solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh])\n", " else:\n", " [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)\n", " # normal P2PKH and P2PK with compressed keys should always be seen\n", " solvable_anytime.extend([p2pkh, p2pk])\n", " # P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are seen after direct importaddress\n", " solvable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])\n", "\n", " for i in uncompressed_solvable_address:\n", " v = self.nodes[0].validateaddress(i)\n", " if (v['isscript']):\n", " [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)\n", " # Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress\n", " solvable_after_importaddress.extend([bare, p2sh])\n", " # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen\n", " unseen_anytime.extend([p2wsh, p2sh_p2wsh])\n", " else:\n", " [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)\n", " # normal P2PKH and P2PK with uncompressed keys should always be seen\n", " solvable_anytime.extend([p2pkh, p2pk])\n", " # P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress\n", " solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])\n", " # witness with uncompressed keys are never seen\n", " unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])\n", "\n", " op1 = CScript([OP_1])\n", " op0 = CScript([OP_0])\n", " # 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V\n", " unsolvable_address = [\"mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V\", \"2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe\", script_to_p2sh(op1), script_to_p2sh(op0)]\n", " unsolvable_address_key = hex_str_to_bytes(\"02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D\")\n", " unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG])\n", " unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)])\n", " p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL])\n", " p2wshop1 = CScript([OP_0, sha256(op1)])\n", " unsolvable_after_importaddress.append(unsolvablep2pkh)\n", " unsolvable_after_importaddress.append(unsolvablep2wshp2pkh)\n", " unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script\n", " unsolvable_after_importaddress.append(p2wshop1)\n", " unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided\n", " unsolvable_after_importaddress.append(p2shop0)\n", "\n", " spendable_txid = []\n", " solvable_txid = []\n", " spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2))\n", " solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1))\n", " self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0)\n", "\n", " importlist = []\n", " for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address:\n", " v = self.nodes[0].validateaddress(i)\n", " if (v['isscript']):\n", " bare = hex_str_to_bytes(v['hex'])\n", " importlist.append(bytes_to_hex_str(bare))\n", " importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)])))\n", " else:\n", " pubkey = hex_str_to_bytes(v['pubkey'])\n", " p2pk = CScript([pubkey, OP_CHECKSIG])\n", " p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG])\n", " importlist.append(bytes_to_hex_str(p2pk))\n", " importlist.append(bytes_to_hex_str(p2pkh))\n", " importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)])))\n", " importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)])))\n", " importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)])))\n", "\n", " importlist.append(bytes_to_hex_str(unsolvablep2pkh))\n", " importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh))\n", " importlist.append(bytes_to_hex_str(op1))\n", " importlist.append(bytes_to_hex_str(p2wshop1))\n", "\n", " for i in importlist:\n", " # import all generated addresses. The wallet already has the private keys for some of these, so catch JSON RPC\n", " # exceptions and continue.\n", " try:\n", " self.nodes[0].importaddress(i,\"\",False,True)\n", " except JSONRPCException as exp:\n", " assert_equal(exp.error[\"message\"], \"The wallet already contains the private key for this address or script\")\n", " assert_equal(exp.error[\"code\"], -4)\n", "\n", " self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only\n", " self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey\n", "\n", " spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))\n", " solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))\n", " self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)\n", " self.mine_and_test_listunspent(unseen_anytime, 0)\n", "\n", " # addwitnessaddress should refuse to return a witness address if an uncompressed key is used or the address is\n", " # not in the wallet\n", " # note that no witness address should be returned by unsolvable addresses\n", " # the multisig_without_privkey_address will fail because its keys were not added with importpubkey\n", " for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address + [multisig_without_privkey_address]:\n", " assert_raises_jsonrpc(-4, \"Public key or redeemscript not known to wallet, or the key is uncompressed\", self.nodes[0].addwitnessaddress, i)\n", "\n", " for i in compressed_spendable_address + compressed_solvable_address:\n", " witaddress = self.nodes[0].addwitnessaddress(i)\n", " # addwitnessaddress should return the same address if it is a known P2SH-witness address\n", " assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))\n", "\n", " spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))\n", " solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))\n", " self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)\n", " self.mine_and_test_listunspent(unseen_anytime, 0)\n", "\n", " # Repeat some tests. This time we don't add witness scripts with importaddress\n", " # Import a compressed key and an uncompressed key, generate some multisig addresses\n", " self.nodes[0].importprivkey(\"927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH\")\n", " uncompressed_spendable_address = [\"mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi\"]\n", " self.nodes[0].importprivkey(\"cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw\")\n", " compressed_spendable_address = [\"n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL\"]\n", "\n", " self.nodes[0].importpubkey(pubkeys[5])\n", " compressed_solvable_address = [key_to_p2pkh(pubkeys[5])]\n", " self.nodes[0].importpubkey(pubkeys[6])\n", " uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])]\n", "\n", " spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress\n", " solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable\n", " unseen_anytime = [] # These outputs should never be seen\n", "\n", " uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]]))\n", " uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]]))\n", " compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]]))\n", " uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]]))\n", " compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]]))\n", "\n", " premature_witaddress = []\n", "\n", " for i in compressed_spendable_address:\n", " v = self.nodes[0].validateaddress(i)\n", " if (v['isscript']):\n", " [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)\n", " # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress\n", " spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])\n", " premature_witaddress.append(script_to_p2sh(p2wsh))\n", " else:\n", " [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)\n", " # P2WPKH, P2SH_P2WPKH are spendable after addwitnessaddress\n", " spendable_after_addwitnessaddress.extend([p2wpkh, p2sh_p2wpkh])\n", " premature_witaddress.append(script_to_p2sh(p2wpkh))\n", "\n", " for i in uncompressed_spendable_address + uncompressed_solvable_address:\n", " v = self.nodes[0].validateaddress(i)\n", " if (v['isscript']):\n", " [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)\n", " # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen\n", " unseen_anytime.extend([p2wsh, p2sh_p2wsh])\n", " else:\n", " [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)\n", " # P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen\n", " unseen_anytime.extend([p2wpkh, p2sh_p2wpkh])\n", "\n", " for i in compressed_solvable_address:\n", " v = self.nodes[0].validateaddress(i)\n", " if (v['isscript']):\n", " # P2WSH multisig without private key are seen after addwitnessaddress\n", " [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)\n", " solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])\n", " premature_witaddress.append(script_to_p2sh(p2wsh))\n", " else:\n", " [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)\n", " # P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after addwitnessaddress\n", " solvable_after_addwitnessaddress.extend([p2wpkh, p2sh_p2wpkh])\n", " premature_witaddress.append(script_to_p2sh(p2wpkh))\n", "\n", " self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0)\n", "\n", " # addwitnessaddress should refuse to return a witness address if an uncompressed key is used\n", " # note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress\n", " # premature_witaddress are not accepted until the script is added with addwitnessaddress first\n", " for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress + [compressed_solvable_address[1]]:\n", " # This will raise an exception\n", " assert_raises_jsonrpc(-4, \"Public key or redeemscript not known to wallet, or the key is uncompressed\", self.nodes[0].addwitnessaddress, i)\n", "\n", " # after importaddress it should pass addwitnessaddress\n", " v = self.nodes[0].validateaddress(compressed_solvable_address[1])\n", " self.nodes[0].importaddress(v['hex'],\"\",False,True)\n", " for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress:\n", " witaddress = self.nodes[0].addwitnessaddress(i)\n", " assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))\n", "\n", " spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress, 2))\n", " solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress, 1))\n", " self.mine_and_test_listunspent(unseen_anytime, 0)\n", "\n", " # Check that spendable outputs are really spendable\n", " self.create_and_mine_tx_from_txids(spendable_txid)\n", "\n", " # import all the private keys so solvable addresses become spendable\n", " self.nodes[0].importprivkey(\"cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb\")\n", " self.nodes[0].importprivkey(\"cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97\")\n", " self.nodes[0].importprivkey(\"91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV\")\n", " self.nodes[0].importprivkey(\"cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd\")\n", " self.nodes[0].importprivkey(\"cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66\")\n", " self.nodes[0].importprivkey(\"cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K\")\n", " self.create_and_mine_tx_from_txids(solvable_txid)\n", "\n", " def mine_and_test_listunspent(self, script_list, ismine):\n", " utxo = find_unspent(self.nodes[0], 50)\n", " tx = CTransaction()\n", " tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout'])))\n", " for i in script_list:\n", " tx.vout.append(CTxOut(10000000, i))\n", " tx.rehash()\n", " signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex']\n", " txid = self.nodes[0].sendrawtransaction(signresults, True)\n", " self.nodes[0].generate(1)\n", " sync_blocks(self.nodes)\n", " watchcount = 0\n", " spendcount = 0\n", " for i in self.nodes[0].listunspent():\n", " if (i['txid'] == txid):\n", " watchcount += 1\n", " if (i['spendable'] == True):\n", " spendcount += 1\n", " if (ismine == 2):\n", " assert_equal(spendcount, len(script_list))\n", " elif (ismine == 1):\n", " assert_equal(watchcount, len(script_list))\n", " assert_equal(spendcount, 0)\n", " else:\n", " assert_equal(watchcount, 0)\n", " return txid\n", "\n", " def p2sh_address_to_script(self,v):\n", " bare = CScript(hex_str_to_bytes(v['hex']))\n", " p2sh = CScript(hex_str_to_bytes(v['scriptPubKey']))\n", " p2wsh = CScript([OP_0, sha256(bare)])\n", " p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL])\n", " return([bare, p2sh, p2wsh, p2sh_p2wsh])\n", "\n", " def p2pkh_address_to_script(self,v):\n", " pubkey = hex_str_to_bytes(v['pubkey'])\n", " p2wpkh = CScript([OP_0, hash160(pubkey)])\n", " p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL])\n", " p2pk = CScript([pubkey, OP_CHECKSIG])\n", " p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey']))\n", " p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL])\n", " p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL])\n", " p2wsh_p2pk = CScript([OP_0, sha256(p2pk)])\n", " p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)])\n", " p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL])\n", " p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL])\n", " return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]\n", "\n", " def create_and_mine_tx_from_txids(self, txids, success = True):\n", " tx = CTransaction()\n", " for i in txids:\n", " txtmp = CTransaction()\n", " txraw = self.nodes[0].getrawtransaction(i)\n", " f = BytesIO(hex_str_to_bytes(txraw))\n", " txtmp.deserialize(f)\n", " for j in range(len(txtmp.vout)):\n", " tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j)))\n", " tx.vout.append(CTxOut(0, CScript()))\n", " tx.rehash()\n", " signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex']\n", " self.nodes[0].sendrawtransaction(signresults, True)\n", " self.nodes[0].generate(1)\n", " sync_blocks(self.nodes)\n", "\n", "\n", "if __name__ == '__main__':\n", " SegWitTest().main()\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0.009523809523809525, 0, 0.0064516129032258064, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02564102564102564, 0.034482758620689655, 0, 0, 0, 0, 0, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0.0136986301369863, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01834862385321101, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0.05263157894736842, 0, 0, 0, 0, 0, 0.02857142857142857, 0, 0, 0, 0, 0.023809523809523808, 0, 0, 0, 0, 0, 0, 0, 0, 0.008771929824561403, 0.005376344086021506, 0.005847953216374269, 0, 0, 0, 0, 0, 0, 0, 0.009009009009009009, 0, 0, 0, 0, 0, 0.009009009009009009, 0, 0, 0, 0, 0, 0.006666666666666667, 0, 0, 0.009009009009009009, 0.010416666666666666, 0, 0, 0, 0.0425531914893617, 0, 0.010416666666666666, 0, 0, 0, 0, 0, 0, 0, 0.014705882352941176, 0, 0, 0, 0, 0, 0.044444444444444446, 0, 0, 0, 0.013333333333333334, 0.013513513513513514, 0, 0, 0.011904761904761904, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.006622516556291391, 0.006622516556291391, 0, 0.044444444444444446, 0, 0, 0, 0.009708737864077669, 0, 0, 0, 0.0425531914893617, 0, 0, 0.010752688172043012, 0, 0.009345794392523364, 0.009345794392523364, 0.009259259259259259, 0.009259259259259259, 0, 0.006802721088435374, 0.00684931506849315, 0, 0.010869565217391304, 0.010869565217391304, 0.010752688172043012, 0.010752688172043012, 0, 0.012048192771084338, 0.03614457831325301, 0.03614457831325301, 0.03571428571428571, 0.03571428571428571, 0, 0.01098901098901099, 0, 0.008771929824561403, 0.034482758620689655, 0.034482758620689655, 0, 0.010638297872340425, 0.009259259259259259, 0.009259259259259259, 0, 0.007352941176470588, 0.023622047244094488, 0.023809523809523808, 0, 0.011235955056179775, 0, 0.030927835051546393, 0, 0, 0, 0, 0, 0.0070921985815602835, 0.02, 0.02, 0, 0.010309278350515464, 0.008, 0.0078125, 0.007751937984496124, 0.007751937984496124, 0.008130081300813009, 0, 0.010869565217391304, 0, 0, 0.008849557522123894, 0.008928571428571428, 0, 0, 0.03488372093023256, 0.03488372093023256, 0.034482758620689655, 0.034482758620689655, 0, 0.01098901098901099, 0, 0.014705882352941176, 0.008403361344537815, 0, 0, 0, 0, 0, 0.013888888888888888, 0, 0.011111111111111112, 0.011904761904761904, 0.011904761904761904, 0.011627906976744186, 0, 0.008, 0, 0, 0.014492753623188406, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012658227848101266, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0273972602739726, 0, 0, 0, 0.012195121951219513, 0, 0.0273972602739726, 0, 0, 0, 0, 0, 0, 0, 0.010638297872340425, 0, 0, 0, 0.014598540145985401, 0.014598540145985401, 0.01, 0.014598540145985401, 0.014598540145985401, 0.014598540145985401, 0.01, 0, 0, 0.010869565217391304, 0.01098901098901099, 0, 0.010869565217391304, 0, 0.01834862385321101, 0.018867924528301886, 0, 0, 0, 0, 0, 0, 0, 0, 0.007462686567164179, 0.009708737864077669, 0.008264462809917356, 0.009174311926605505, 0.009433962264150943, 0.011235955056179775, 0, 0.006535947712418301, 0.0064516129032258064, 0.006711409395973154, 0.006622516556291391, 0.006802721088435374, 0.00684931506849315, 0.009615384615384616, 0, 0, 0.010526315789473684, 0.008403361344537815, 0, 0.009523809523809525, 0.008547008547008548, 0.010638297872340425, 0, 0, 0, 0, 0.012345679012345678, 0.011235955056179775, 0, 0.009009009009009009, 0, 0, 0.005952380952380952, 0.011363636363636364, 0, 0.008695652173913044, 0.006289308176100629, 0, 0, 0, 0, 0.012345679012345678, 0.01098901098901099, 0, 0.011494252873563218, 0, 0, 0.005952380952380952, 0.011111111111111112, 0, 0.011904761904761904, 0, 0, 0.008264462809917356, 0, 0, 0, 0, 0.00909090909090909, 0.012345679012345678, 0.011764705882352941, 0, 0.005952380952380952, 0.012048192771084338, 0, 0.00909090909090909, 0.006329113924050633, 0, 0, 0, 0, 0.012345679012345678, 0.0078125, 0, 0.011494252873563218, 0, 0, 0.005952380952380952, 0.011764705882352941, 0, 0.010101010101010102, 0, 0, 0.008264462809917356, 0, 0, 0, 0.009009009009009009, 0.006711409395973154, 0.008333333333333333, 0.00847457627118644, 0, 0, 0, 0, 0, 0.023529411764705882, 0, 0.020202020202020204, 0, 0, 0, 0, 0.011904761904761904, 0.012195121951219513, 0.006493506493506494, 0, 0, 0.007042253521126761, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0.01, 0, 0, 0.011627906976744186, 0.012048192771084338, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0.008130081300813009, 0, 0, 0.04918032786885246, 0, 0.008, 0, 0, 0.022988505747126436, 0.0196078431372549, 0, 0.008620689655172414, 0.008849557522123894, 0, 0, 0, 0.008403361344537815, 0, 0.012195121951219513, 0.009345794392523364, 0.006369426751592357, 0.006578947368421052, 0, 0, 0, 0.009900990099009901, 0.012195121951219513, 0, 0.008620689655172414, 0.008849557522123894, 0, 0, 0, 0.011494252873563218, 0.010869565217391304, 0.01098901098901099, 0, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0, 0.009708737864077669, 0.01652892561983471, 0.011235955056179775, 0, 0.006535947712418301, 0.0064516129032258064, 0.006711409395973154, 0.006666666666666667, 0.006802721088435374, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0.009259259259259259, 0, 0, 0, 0.005952380952380952, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0.012345679012345678, 0.011494252873563218, 0, 0, 0.005952380952380952, 0, 0, 0, 0, 0, 0, 0.011627906976744186, 0.012345679012345678, 0, 0, 0, 0.005952380952380952, 0.010638297872340425, 0, 0, 0, 0.007751937984496124, 0, 0.009900990099009901, 0.008130081300813009, 0.009708737864077669, 0.007194244604316547, 0, 0.006578947368421052, 0, 0, 0, 0.05, 0.01, 0, 0.012195121951219513, 0, 0.01, 0.01020408163265306, 0, 0, 0, 0, 0, 0, 0.010869565217391304, 0.010869565217391304, 0.01098901098901099, 0.010869565217391304, 0.010869565217391304, 0.010869565217391304, 0, 0, 0, 0, 0, 0.0125, 0, 0, 0, 0.008928571428571428, 0, 0, 0, 0, 0, 0, 0, 0, 0.022222222222222223, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.025, 0, 0, 0, 0, 0, 0, 0.024390243902439025, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0.007518796992481203, 0, 0.029411764705882353, 0, 0, 0, 0, 0, 0, 0, 0.015151515151515152, 0, 0, 0.008928571428571428, 0, 0, 0, 0, 0, 0, 0 ]
641
0.00483
""" This will make some function approximators that we can use, particularly: linear and neural network value functions. Instantiate instances of these in other pieces of the code base. (c) April 2017 by Daniel Seita, built upon `starter code` from John Schulman. """ import numpy as np import tensorflow as tf import tensorflow.contrib.distributions as distr import sys if "../" not in sys.path: sys.path.append("../") from utils import utils_pg as utils np.set_printoptions(edgeitems=100) class LinearValueFunction(object): """ Estimates the baseline function for PGs via ridge regression. """ coef = None def fit(self, X, y): """ Updates weights (self.coef) with design matrix X (i.e. observations) and targets (i.e. actual returns) y. """ assert X.shape[0] == y.shape[0] assert len(y.shape) == 1 Xp = self.preproc(X) A = Xp.T.dot(Xp) nfeats = Xp.shape[1] A[np.arange(nfeats), np.arange(nfeats)] += 1e-3 # a little ridge regression b = Xp.T.dot(y) self.coef = np.linalg.solve(A, b) def predict(self, X): """ Predicts return from observations (i.e. environment states) X. """ if self.coef is None: return np.zeros(X.shape[0]) else: return self.preproc(X).dot(self.coef) def preproc(self, X): """ Adding a bias column, and also adding squared values (huh). """ return np.concatenate([np.ones([X.shape[0], 1]), X, np.square(X)/2.0], axis=1) class NnValueFunction(object): """ Estimates the baseline function for PGs via neural network. """ def __init__(self, session, ob_dim=None, n_epochs=10, stepsize=1e-3): """ They provide us with an ob_dim in the code so I assume we can use it; makes it easy to define the layers anyway. This gets constructed upon initialization so future calls to self.fit should remember this. I actually use the pre-processed version, though. """ self.n_epochs = n_epochs self.lrate = stepsize self.sy_ytarg = tf.placeholder(shape=[None], name="nnvf_y", dtype=tf.float32) self.sy_ob_no = tf.placeholder(shape=[None, ob_dim+1], name="nnvf_ob", dtype=tf.float32) self.sy_h1 = utils.lrelu(utils.dense(self.sy_ob_no, 32, "nnvf_h1", weight_init=utils.normc_initializer(1.0)), leak=0.0) self.sy_h2 = utils.lrelu(utils.dense(self.sy_h1, 32, "nnvf_h2", weight_init=utils.normc_initializer(1.0)), leak=0.0) self.sy_final_n = utils.dense(self.sy_h2, 1, "nnvf_final", weight_init=utils.normc_initializer(1.0)) self.sy_ypred = tf.reshape(self.sy_final_n, [-1]) self.sy_l2_error = tf.reduce_mean(tf.square(self.sy_ypred - self.sy_ytarg)) self.fit_op = tf.train.AdamOptimizer(stepsize).minimize(self.sy_l2_error) self.sess = session def fit(self, X, y): """ Updates weights (self.coef) with design matrix X (i.e. observations) and targets (i.e. actual returns) y. NOTE! We now return a dictionary `out` so that we can provide information relevant information for the logger. """ assert X.shape[0] == y.shape[0] assert len(y.shape) == 1 out = {} out["PredStdevBefore"]= self.predict(X).std() Xp = self.preproc(X) for i in range(self.n_epochs): _,err = self.sess.run( [self.fit_op, self.sy_l2_error], feed_dict={self.sy_ob_no: Xp, self.sy_ytarg: y }) if i == 0: out["MSEBefore"] = np.sqrt(err) if i == self.n_epochs-1: out["MSEAfter"] = np.sqrt(err) out["PredStdevAfter"] = self.predict(X).std() out["TargStdev"] = y.std() return out def predict(self, X): """ Predicts returns from observations (i.e. environment states) X. I also think we need a session here. No need to expand dimensions, BTW! It's effectively already done for us elsewhere. """ Xp = self.preproc(X) return self.sess.run(self.sy_ypred, feed_dict={self.sy_ob_no:Xp}) def preproc(self, X): """ Let's add this here to increase dimensionality. """ #return np.concatenate([np.ones([X.shape[0], 1]), X, np.square(X)/2.0], axis=1) return np.concatenate([np.ones([X.shape[0], 1]), X], axis=1)
[ "\"\"\"\n", "This will make some function approximators that we can use, particularly: linear\n", "and neural network value functions. Instantiate instances of these in other\n", "pieces of the code base.\n", "\n", "(c) April 2017 by Daniel Seita, built upon `starter code` from John Schulman.\n", "\"\"\"\n", "\n", "import numpy as np\n", "import tensorflow as tf\n", "import tensorflow.contrib.distributions as distr\n", "import sys\n", "if \"../\" not in sys.path:\n", " sys.path.append(\"../\")\n", "from utils import utils_pg as utils\n", "np.set_printoptions(edgeitems=100)\n", "\n", "\n", "class LinearValueFunction(object):\n", " \"\"\" Estimates the baseline function for PGs via ridge regression. \"\"\"\n", " coef = None\n", "\n", " def fit(self, X, y):\n", " \"\"\" \n", " Updates weights (self.coef) with design matrix X (i.e. observations) and\n", " targets (i.e. actual returns) y. \n", " \"\"\"\n", " assert X.shape[0] == y.shape[0]\n", " assert len(y.shape) == 1\n", " Xp = self.preproc(X)\n", " A = Xp.T.dot(Xp)\n", " nfeats = Xp.shape[1]\n", " A[np.arange(nfeats), np.arange(nfeats)] += 1e-3 # a little ridge regression\n", " b = Xp.T.dot(y)\n", " self.coef = np.linalg.solve(A, b)\n", "\n", " def predict(self, X):\n", " \"\"\" Predicts return from observations (i.e. environment states) X. \"\"\"\n", " if self.coef is None:\n", " return np.zeros(X.shape[0])\n", " else:\n", " return self.preproc(X).dot(self.coef)\n", "\n", " def preproc(self, X):\n", " \"\"\" Adding a bias column, and also adding squared values (huh). \"\"\"\n", " return np.concatenate([np.ones([X.shape[0], 1]), X, np.square(X)/2.0], axis=1)\n", "\n", "\n", "class NnValueFunction(object):\n", " \"\"\" Estimates the baseline function for PGs via neural network. \"\"\"\n", "\n", " def __init__(self, session, ob_dim=None, n_epochs=10, stepsize=1e-3):\n", " \"\"\" \n", " They provide us with an ob_dim in the code so I assume we can use it;\n", " makes it easy to define the layers anyway. This gets constructed upon\n", " initialization so future calls to self.fit should remember this. I\n", " actually use the pre-processed version, though.\n", " \"\"\"\n", " self.n_epochs = n_epochs\n", " self.lrate = stepsize\n", " self.sy_ytarg = tf.placeholder(shape=[None], name=\"nnvf_y\", dtype=tf.float32)\n", " self.sy_ob_no = tf.placeholder(shape=[None, ob_dim+1], name=\"nnvf_ob\", dtype=tf.float32)\n", " self.sy_h1 = utils.lrelu(utils.dense(self.sy_ob_no, 32, \"nnvf_h1\", weight_init=utils.normc_initializer(1.0)), leak=0.0)\n", " self.sy_h2 = utils.lrelu(utils.dense(self.sy_h1, 32, \"nnvf_h2\", weight_init=utils.normc_initializer(1.0)), leak=0.0)\n", " self.sy_final_n = utils.dense(self.sy_h2, 1, \"nnvf_final\", weight_init=utils.normc_initializer(1.0))\n", " self.sy_ypred = tf.reshape(self.sy_final_n, [-1])\n", " self.sy_l2_error = tf.reduce_mean(tf.square(self.sy_ypred - self.sy_ytarg))\n", " self.fit_op = tf.train.AdamOptimizer(stepsize).minimize(self.sy_l2_error)\n", " self.sess = session\n", "\n", " def fit(self, X, y):\n", " \"\"\" Updates weights (self.coef) with design matrix X (i.e. observations)\n", " and targets (i.e. actual returns) y. NOTE! We now return a dictionary\n", " `out` so that we can provide information relevant information for the\n", " logger.\n", " \"\"\"\n", " assert X.shape[0] == y.shape[0]\n", " assert len(y.shape) == 1\n", " out = {}\n", " out[\"PredStdevBefore\"]= self.predict(X).std()\n", "\n", " Xp = self.preproc(X)\n", " for i in range(self.n_epochs):\n", " _,err = self.sess.run(\n", " [self.fit_op, self.sy_l2_error], \n", " feed_dict={self.sy_ob_no: Xp,\n", " self.sy_ytarg: y\n", " })\n", " if i == 0:\n", " out[\"MSEBefore\"] = np.sqrt(err)\n", " if i == self.n_epochs-1:\n", " out[\"MSEAfter\"] = np.sqrt(err)\n", "\n", " out[\"PredStdevAfter\"] = self.predict(X).std()\n", " out[\"TargStdev\"] = y.std()\n", " return out\n", "\n", " def predict(self, X):\n", " \"\"\" \n", " Predicts returns from observations (i.e. environment states) X. I also\n", " think we need a session here. No need to expand dimensions, BTW! It's\n", " effectively already done for us elsewhere.\n", " \"\"\"\n", " Xp = self.preproc(X)\n", " return self.sess.run(self.sy_ypred, feed_dict={self.sy_ob_no:Xp})\n", "\n", " def preproc(self, X):\n", " \"\"\" Let's add this here to increase dimensionality. \"\"\"\n", " #return np.concatenate([np.ones([X.shape[0], 1]), X, np.square(X)/2.0], axis=1)\n", " return np.concatenate([np.ones([X.shape[0], 1]), X], axis=1)\n" ]
[ 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07692307692307693, 0.012345679012345678, 0.023809523809523808, 0, 0, 0, 0, 0, 0, 0.023809523809523808, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0.07692307692307693, 0, 0, 0, 0, 0, 0.027777777777777776, 0.027777777777777776, 0.02247191011235955, 0.02, 0.014925373134328358, 0.015267175572519083, 0.01818181818181818, 0.01639344262295082, 0.011904761904761904, 0.022988505747126436, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0.018518518518518517, 0, 0, 0, 0.02857142857142857, 0.018518518518518517, 0, 0, 0.043478260869565216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07692307692307693, 0, 0, 0, 0, 0, 0.013513513513513514, 0, 0, 0, 0.022727272727272728, 0 ]
110
0.00609
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file '/home/grossmj/PycharmProjects/gns3-gui/gns3/modules/virtualbox/ui/virtualbox_preferences_page.ui' # # Created: Mon Mar 9 18:00:29 2015 # by: PyQt4 UI code generator 4.10.4 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_VirtualBoxPreferencesPageWidget(object): def setupUi(self, VirtualBoxPreferencesPageWidget): VirtualBoxPreferencesPageWidget.setObjectName(_fromUtf8("VirtualBoxPreferencesPageWidget")) VirtualBoxPreferencesPageWidget.resize(430, 490) self.verticalLayout_2 = QtGui.QVBoxLayout(VirtualBoxPreferencesPageWidget) self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2")) self.uiTabWidget = QtGui.QTabWidget(VirtualBoxPreferencesPageWidget) self.uiTabWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.uiTabWidget.setObjectName(_fromUtf8("uiTabWidget")) self.uiGeneralSettingsTabWidget = QtGui.QWidget() self.uiGeneralSettingsTabWidget.setObjectName(_fromUtf8("uiGeneralSettingsTabWidget")) self.verticalLayout = QtGui.QVBoxLayout(self.uiGeneralSettingsTabWidget) self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.uiUseLocalServercheckBox = QtGui.QCheckBox(self.uiGeneralSettingsTabWidget) self.uiUseLocalServercheckBox.setChecked(True) self.uiUseLocalServercheckBox.setObjectName(_fromUtf8("uiUseLocalServercheckBox")) self.verticalLayout.addWidget(self.uiUseLocalServercheckBox) self.uiVboxManagePathLabel = QtGui.QLabel(self.uiGeneralSettingsTabWidget) self.uiVboxManagePathLabel.setObjectName(_fromUtf8("uiVboxManagePathLabel")) self.verticalLayout.addWidget(self.uiVboxManagePathLabel) self.horizontalLayout_5 = QtGui.QHBoxLayout() self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5")) self.uiVboxManagePathLineEdit = QtGui.QLineEdit(self.uiGeneralSettingsTabWidget) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.uiVboxManagePathLineEdit.sizePolicy().hasHeightForWidth()) self.uiVboxManagePathLineEdit.setSizePolicy(sizePolicy) self.uiVboxManagePathLineEdit.setObjectName(_fromUtf8("uiVboxManagePathLineEdit")) self.horizontalLayout_5.addWidget(self.uiVboxManagePathLineEdit) self.uiVboxManagePathToolButton = QtGui.QToolButton(self.uiGeneralSettingsTabWidget) self.uiVboxManagePathToolButton.setToolButtonStyle(QtCore.Qt.ToolButtonTextOnly) self.uiVboxManagePathToolButton.setObjectName(_fromUtf8("uiVboxManagePathToolButton")) self.horizontalLayout_5.addWidget(self.uiVboxManagePathToolButton) self.verticalLayout.addLayout(self.horizontalLayout_5) self.uiVboxManageUserLabel = QtGui.QLabel(self.uiGeneralSettingsTabWidget) self.uiVboxManageUserLabel.setObjectName(_fromUtf8("uiVboxManageUserLabel")) self.verticalLayout.addWidget(self.uiVboxManageUserLabel) self.uiVboxManageUserLineEdit = QtGui.QLineEdit(self.uiGeneralSettingsTabWidget) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.uiVboxManageUserLineEdit.sizePolicy().hasHeightForWidth()) self.uiVboxManageUserLineEdit.setSizePolicy(sizePolicy) self.uiVboxManageUserLineEdit.setObjectName(_fromUtf8("uiVboxManageUserLineEdit")) self.verticalLayout.addWidget(self.uiVboxManageUserLineEdit) spacerItem = QtGui.QSpacerItem(390, 193, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem) self.uiTabWidget.addTab(self.uiGeneralSettingsTabWidget, _fromUtf8("")) self.verticalLayout_2.addWidget(self.uiTabWidget) self.horizontalLayout_2 = QtGui.QHBoxLayout() self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2")) spacerItem1 = QtGui.QSpacerItem(164, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem1) self.uiRestoreDefaultsPushButton = QtGui.QPushButton(VirtualBoxPreferencesPageWidget) self.uiRestoreDefaultsPushButton.setObjectName(_fromUtf8("uiRestoreDefaultsPushButton")) self.horizontalLayout_2.addWidget(self.uiRestoreDefaultsPushButton) self.verticalLayout_2.addLayout(self.horizontalLayout_2) self.retranslateUi(VirtualBoxPreferencesPageWidget) self.uiTabWidget.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(VirtualBoxPreferencesPageWidget) def retranslateUi(self, VirtualBoxPreferencesPageWidget): VirtualBoxPreferencesPageWidget.setWindowTitle(_translate("VirtualBoxPreferencesPageWidget", "VirtualBox", None)) self.uiUseLocalServercheckBox.setText(_translate("VirtualBoxPreferencesPageWidget", "Use the local server", None)) self.uiVboxManagePathLabel.setText(_translate("VirtualBoxPreferencesPageWidget", "Path to VBoxManage:", None)) self.uiVboxManagePathToolButton.setText(_translate("VirtualBoxPreferencesPageWidget", "&Browse...", None)) self.uiVboxManageUserLabel.setText(_translate("VirtualBoxPreferencesPageWidget", "Run VirtualBox as another user (GNS3 running as root):", None)) self.uiTabWidget.setTabText(self.uiTabWidget.indexOf(self.uiGeneralSettingsTabWidget), _translate("VirtualBoxPreferencesPageWidget", "General settings", None)) self.uiRestoreDefaultsPushButton.setText(_translate("VirtualBoxPreferencesPageWidget", "Restore defaults", None))
[ "# -*- coding: utf-8 -*-\n", "\n", "# Form implementation generated from reading ui file '/home/grossmj/PycharmProjects/gns3-gui/gns3/modules/virtualbox/ui/virtualbox_preferences_page.ui'\n", "#\n", "# Created: Mon Mar 9 18:00:29 2015\n", "# by: PyQt4 UI code generator 4.10.4\n", "#\n", "# WARNING! All changes made in this file will be lost!\n", "\n", "from PyQt4 import QtCore, QtGui\n", "\n", "try:\n", " _fromUtf8 = QtCore.QString.fromUtf8\n", "except AttributeError:\n", " def _fromUtf8(s):\n", " return s\n", "\n", "try:\n", " _encoding = QtGui.QApplication.UnicodeUTF8\n", " def _translate(context, text, disambig):\n", " return QtGui.QApplication.translate(context, text, disambig, _encoding)\n", "except AttributeError:\n", " def _translate(context, text, disambig):\n", " return QtGui.QApplication.translate(context, text, disambig)\n", "\n", "class Ui_VirtualBoxPreferencesPageWidget(object):\n", " def setupUi(self, VirtualBoxPreferencesPageWidget):\n", " VirtualBoxPreferencesPageWidget.setObjectName(_fromUtf8(\"VirtualBoxPreferencesPageWidget\"))\n", " VirtualBoxPreferencesPageWidget.resize(430, 490)\n", " self.verticalLayout_2 = QtGui.QVBoxLayout(VirtualBoxPreferencesPageWidget)\n", " self.verticalLayout_2.setObjectName(_fromUtf8(\"verticalLayout_2\"))\n", " self.uiTabWidget = QtGui.QTabWidget(VirtualBoxPreferencesPageWidget)\n", " self.uiTabWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n", " self.uiTabWidget.setObjectName(_fromUtf8(\"uiTabWidget\"))\n", " self.uiGeneralSettingsTabWidget = QtGui.QWidget()\n", " self.uiGeneralSettingsTabWidget.setObjectName(_fromUtf8(\"uiGeneralSettingsTabWidget\"))\n", " self.verticalLayout = QtGui.QVBoxLayout(self.uiGeneralSettingsTabWidget)\n", " self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n", " self.uiUseLocalServercheckBox = QtGui.QCheckBox(self.uiGeneralSettingsTabWidget)\n", " self.uiUseLocalServercheckBox.setChecked(True)\n", " self.uiUseLocalServercheckBox.setObjectName(_fromUtf8(\"uiUseLocalServercheckBox\"))\n", " self.verticalLayout.addWidget(self.uiUseLocalServercheckBox)\n", " self.uiVboxManagePathLabel = QtGui.QLabel(self.uiGeneralSettingsTabWidget)\n", " self.uiVboxManagePathLabel.setObjectName(_fromUtf8(\"uiVboxManagePathLabel\"))\n", " self.verticalLayout.addWidget(self.uiVboxManagePathLabel)\n", " self.horizontalLayout_5 = QtGui.QHBoxLayout()\n", " self.horizontalLayout_5.setObjectName(_fromUtf8(\"horizontalLayout_5\"))\n", " self.uiVboxManagePathLineEdit = QtGui.QLineEdit(self.uiGeneralSettingsTabWidget)\n", " sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)\n", " sizePolicy.setHorizontalStretch(0)\n", " sizePolicy.setVerticalStretch(0)\n", " sizePolicy.setHeightForWidth(self.uiVboxManagePathLineEdit.sizePolicy().hasHeightForWidth())\n", " self.uiVboxManagePathLineEdit.setSizePolicy(sizePolicy)\n", " self.uiVboxManagePathLineEdit.setObjectName(_fromUtf8(\"uiVboxManagePathLineEdit\"))\n", " self.horizontalLayout_5.addWidget(self.uiVboxManagePathLineEdit)\n", " self.uiVboxManagePathToolButton = QtGui.QToolButton(self.uiGeneralSettingsTabWidget)\n", " self.uiVboxManagePathToolButton.setToolButtonStyle(QtCore.Qt.ToolButtonTextOnly)\n", " self.uiVboxManagePathToolButton.setObjectName(_fromUtf8(\"uiVboxManagePathToolButton\"))\n", " self.horizontalLayout_5.addWidget(self.uiVboxManagePathToolButton)\n", " self.verticalLayout.addLayout(self.horizontalLayout_5)\n", " self.uiVboxManageUserLabel = QtGui.QLabel(self.uiGeneralSettingsTabWidget)\n", " self.uiVboxManageUserLabel.setObjectName(_fromUtf8(\"uiVboxManageUserLabel\"))\n", " self.verticalLayout.addWidget(self.uiVboxManageUserLabel)\n", " self.uiVboxManageUserLineEdit = QtGui.QLineEdit(self.uiGeneralSettingsTabWidget)\n", " sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)\n", " sizePolicy.setHorizontalStretch(0)\n", " sizePolicy.setVerticalStretch(0)\n", " sizePolicy.setHeightForWidth(self.uiVboxManageUserLineEdit.sizePolicy().hasHeightForWidth())\n", " self.uiVboxManageUserLineEdit.setSizePolicy(sizePolicy)\n", " self.uiVboxManageUserLineEdit.setObjectName(_fromUtf8(\"uiVboxManageUserLineEdit\"))\n", " self.verticalLayout.addWidget(self.uiVboxManageUserLineEdit)\n", " spacerItem = QtGui.QSpacerItem(390, 193, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)\n", " self.verticalLayout.addItem(spacerItem)\n", " self.uiTabWidget.addTab(self.uiGeneralSettingsTabWidget, _fromUtf8(\"\"))\n", " self.verticalLayout_2.addWidget(self.uiTabWidget)\n", " self.horizontalLayout_2 = QtGui.QHBoxLayout()\n", " self.horizontalLayout_2.setObjectName(_fromUtf8(\"horizontalLayout_2\"))\n", " spacerItem1 = QtGui.QSpacerItem(164, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n", " self.horizontalLayout_2.addItem(spacerItem1)\n", " self.uiRestoreDefaultsPushButton = QtGui.QPushButton(VirtualBoxPreferencesPageWidget)\n", " self.uiRestoreDefaultsPushButton.setObjectName(_fromUtf8(\"uiRestoreDefaultsPushButton\"))\n", " self.horizontalLayout_2.addWidget(self.uiRestoreDefaultsPushButton)\n", " self.verticalLayout_2.addLayout(self.horizontalLayout_2)\n", "\n", " self.retranslateUi(VirtualBoxPreferencesPageWidget)\n", " self.uiTabWidget.setCurrentIndex(0)\n", " QtCore.QMetaObject.connectSlotsByName(VirtualBoxPreferencesPageWidget)\n", "\n", " def retranslateUi(self, VirtualBoxPreferencesPageWidget):\n", " VirtualBoxPreferencesPageWidget.setWindowTitle(_translate(\"VirtualBoxPreferencesPageWidget\", \"VirtualBox\", None))\n", " self.uiUseLocalServercheckBox.setText(_translate(\"VirtualBoxPreferencesPageWidget\", \"Use the local server\", None))\n", " self.uiVboxManagePathLabel.setText(_translate(\"VirtualBoxPreferencesPageWidget\", \"Path to VBoxManage:\", None))\n", " self.uiVboxManagePathToolButton.setText(_translate(\"VirtualBoxPreferencesPageWidget\", \"&Browse...\", None))\n", " self.uiVboxManageUserLabel.setText(_translate(\"VirtualBoxPreferencesPageWidget\", \"Run VirtualBox as another user (GNS3 running as root):\", None))\n", " self.uiTabWidget.setTabText(self.uiTabWidget.indexOf(self.uiGeneralSettingsTabWidget), _translate(\"VirtualBoxPreferencesPageWidget\", \"General settings\", None))\n", " self.uiRestoreDefaultsPushButton.setText(_translate(\"VirtualBoxPreferencesPageWidget\", \"Restore defaults\", None))\n", "\n" ]
[ 0, 0, 0.006578947368421052, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.022222222222222223, 0, 0, 0, 0, 0, 0.02, 0, 0.01, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0.010526315789473684, 0.012345679012345678, 0, 0.011235955056179775, 0, 0.01098901098901099, 0, 0.012048192771084338, 0.011764705882352941, 0, 0, 0, 0.011235955056179775, 0.01, 0, 0, 0.009900990099009901, 0, 0.01098901098901099, 0, 0.010752688172043012, 0.011235955056179775, 0.010526315789473684, 0, 0, 0.012048192771084338, 0.011764705882352941, 0, 0.011235955056179775, 0.01, 0, 0, 0.009900990099009901, 0, 0.01098901098901099, 0, 0.009523809523809525, 0, 0, 0, 0, 0, 0.009523809523809525, 0, 0.010638297872340425, 0.010309278350515464, 0, 0, 0, 0, 0, 0, 0, 0, 0.00819672131147541, 0.008130081300813009, 0.008403361344537815, 0.008695652173913044, 0.006493506493506494, 0.005952380952380952, 0.00819672131147541, 1 ]
97
0.014169
# Copyright 2013, Mirantis Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import tables as htables from horizon import tabs from openstack_dashboard import api from neutron_vpnaas_dashboard.api import vpn as api_vpn from neutron_vpnaas_dashboard.dashboards.project.vpn import tables class IPSecSiteConnectionsTab(tabs.TableTab, htables.DataTableView): table_classes = (tables.IPSecSiteConnectionsTable,) name = _("IPSec Site Connections") slug = "ipsecsiteconnections" template_name = ("horizon/common/_detail_table.html") FILTERS_MAPPING = {'admin_state_up': {_("up"): True, _("down"): False}} def get_ipsecsiteconnectionstable_data(self): try: filters = self.get_filters() tenant_id = self.request.user.tenant_id if 'vpnservice' in filters: filters['vpnservice_id'] = \ [v.id for v in api_vpn.vpnservice_list( self.tab_group.request, tenant_id=tenant_id, name=filters['vpnservice'])] del filters['vpnservice'] if 'ikepolicy' in filters: filters['ikepolicy_id'] = \ [i.id for i in api_vpn.ikepolicy_list( self.tab_group.request, tenant_id=tenant_id, name=filters['ikepolicy'])] del filters['ikepolicy'] if 'ipsecpolicy' in filters: filters['ipsecpolicy_id'] = \ [i.id for i in api_vpn.ipsecpolicy_list( self.tab_group.request, tenant_id=tenant_id, name=filters['ipsecpolicy'])] del filters['ipsecpolicy'] ipsecsiteconnections = api_vpn.ipsecsiteconnection_list( self.tab_group.request, tenant_id=tenant_id, **filters) except Exception: ipsecsiteconnections = [] exceptions.handle( self.tab_group.request, _('Unable to retrieve IPSec Site Connections list.')) return ipsecsiteconnections def get_filters(self): self.table = self._tables['ipsecsiteconnectionstable'] self.handle_server_filter(self.request, table=self.table) self.update_server_filter_action(self.request, table=self.table) return super(IPSecSiteConnectionsTab, self).get_filters(filters_map=self.FILTERS_MAPPING) class VPNServicesTab(tabs.TableTab, htables.DataTableView): table_classes = (tables.VPNServicesTable,) name = _("VPN Services") slug = "vpnservices" template_name = ("horizon/common/_detail_table.html") def get_vpnservicestable_data(self): try: filters = self.get_filters() tenant_id = self.request.user.tenant_id if 'subnet_name' in filters: subnets = api.neutron.subnet_list(self.tab_group.request, tenant_id=tenant_id, cidr=filters['subnet_name']) subnets_ids = [n.id for n in subnets] del filters['subnet_name'] if not subnets_ids: return [] filters['subnet_id'] = subnets_ids if 'router_name' in filters: routers = api.neutron.router_list(self.tab_group.request, tenant_id=tenant_id, name=filters['router_name']) routers_ids = [r.id for r in routers] if not routers: return [] filters['router_id'] = routers_ids vpnservices = api_vpn.vpnservice_list( self.tab_group.request, tenant_id=tenant_id, **filters) except Exception: vpnservices = [] exceptions.handle(self.tab_group.request, _('Unable to retrieve VPN Services list.')) return vpnservices def get_filters(self): self.table = self._tables['vpnservicestable'] self.handle_server_filter(self.request, table=self.table) self.update_server_filter_action(self.request, table=self.table) return super(VPNServicesTab, self).get_filters() class IKEPoliciesTab(tabs.TableTab, htables.DataTableView): table_classes = (tables.IKEPoliciesTable,) name = _("IKE Policies") slug = "ikepolicies" template_name = ("horizon/common/_detail_table.html") def get_ikepoliciestable_data(self): try: filters = self.get_filters() tenant_id = self.request.user.tenant_id ikepolicies = api_vpn.ikepolicy_list( self.tab_group.request, tenant_id=tenant_id, **filters) except Exception: ikepolicies = [] exceptions.handle(self.tab_group.request, _('Unable to retrieve IKE Policies list.')) return ikepolicies def get_filters(self): self.table = self._tables['ikepoliciestable'] self.handle_server_filter(self.request, table=self.table) self.update_server_filter_action(self.request, table=self.table) return super(IKEPoliciesTab, self).get_filters() class IPSecPoliciesTab(tabs.TableTab, htables.DataTableView): table_classes = (tables.IPSecPoliciesTable,) name = _("IPSec Policies") slug = "ipsecpolicies" template_name = ("horizon/common/_detail_table.html") def get_ipsecpoliciestable_data(self): try: filters = self.get_filters() tenant_id = self.request.user.tenant_id ipsecpolicies = api_vpn.ipsecpolicy_list( self.tab_group.request, tenant_id=tenant_id, **filters) except Exception: ipsecpolicies = [] exceptions.handle(self.tab_group.request, _('Unable to retrieve IPSec Policies list.')) return ipsecpolicies def get_filters(self): self.table = self._tables['ipsecpoliciestable'] self.handle_server_filter(self.request, table=self.table) self.update_server_filter_action(self.request, table=self.table) return super(IPSecPoliciesTab, self).get_filters() class VPNTabs(tabs.TabGroup): slug = "vpntabs" tabs = (IKEPoliciesTab, IPSecPoliciesTab, VPNServicesTab, IPSecSiteConnectionsTab,) sticky = True class IKEPolicyDetailsTab(tabs.Tab): name = _("IKE Policy Details") slug = "ikepolicydetails" template_name = "project/vpn/_ikepolicy_details.html" def get_context_data(self, request): ikepolicy = self.tab_group.kwargs['ikepolicy'] return {'ikepolicy': ikepolicy} class IKEPolicyDetailsTabs(tabs.TabGroup): slug = "ikepolicytabs" tabs = (IKEPolicyDetailsTab,) class IPSecPolicyDetailsTab(tabs.Tab): name = _("IPSec Policy Details") slug = "ipsecpolicydetails" template_name = "project/vpn/_ipsecpolicy_details.html" def get_context_data(self, request): ipsecpolicy = self.tab_group.kwargs['ipsecpolicy'] return {'ipsecpolicy': ipsecpolicy} class IPSecPolicyDetailsTabs(tabs.TabGroup): slug = "ipsecpolicytabs" tabs = (IPSecPolicyDetailsTab,) class VPNServiceDetailsTab(tabs.Tab): name = _("VPN Service Details") slug = "vpnservicedetails" template_name = "project/vpn/_vpnservice_details.html" def get_context_data(self, request): vpnservice = self.tab_group.kwargs['vpnservice'] return {'vpnservice': vpnservice} class VPNServiceDetailsTabs(tabs.TabGroup): slug = "vpnservicetabs" tabs = (VPNServiceDetailsTab,) class IPSecSiteConnectionDetailsTab(tabs.Tab): name = _("IPSec Site Connection Details") slug = "ipsecsiteconnectiondetails" template_name = "project/vpn/_ipsecsiteconnection_details.html" def get_context_data(self, request): ipsecsiteconnection = self.tab_group.kwargs['ipsecsiteconnection'] return {'ipsecsiteconnection': ipsecsiteconnection} class IPSecSiteConnectionDetailsTabs(tabs.TabGroup): slug = "ipsecsiteconnectiontabs" tabs = (IPSecSiteConnectionDetailsTab,)
[ "# Copyright 2013, Mirantis Inc\n", "#\n", "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n", "# not use this file except in compliance with the License. You may obtain\n", "# a copy of the License at\n", "#\n", "# http://www.apache.org/licenses/LICENSE-2.0\n", "#\n", "# Unless required by applicable law or agreed to in writing, software\n", "# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n", "# License for the specific language governing permissions and limitations\n", "# under the License.\n", "\n", "from django.utils.translation import ugettext_lazy as _\n", "\n", "from horizon import exceptions\n", "from horizon import tables as htables\n", "from horizon import tabs\n", "\n", "from openstack_dashboard import api\n", "\n", "from neutron_vpnaas_dashboard.api import vpn as api_vpn\n", "from neutron_vpnaas_dashboard.dashboards.project.vpn import tables\n", "\n", "\n", "class IPSecSiteConnectionsTab(tabs.TableTab, htables.DataTableView):\n", " table_classes = (tables.IPSecSiteConnectionsTable,)\n", " name = _(\"IPSec Site Connections\")\n", " slug = \"ipsecsiteconnections\"\n", " template_name = (\"horizon/common/_detail_table.html\")\n", " FILTERS_MAPPING = {'admin_state_up': {_(\"up\"): True, _(\"down\"): False}}\n", "\n", " def get_ipsecsiteconnectionstable_data(self):\n", " try:\n", " filters = self.get_filters()\n", " tenant_id = self.request.user.tenant_id\n", " if 'vpnservice' in filters:\n", " filters['vpnservice_id'] = \\\n", " [v.id for v in api_vpn.vpnservice_list(\n", " self.tab_group.request, tenant_id=tenant_id,\n", " name=filters['vpnservice'])]\n", " del filters['vpnservice']\n", " if 'ikepolicy' in filters:\n", " filters['ikepolicy_id'] = \\\n", " [i.id for i in api_vpn.ikepolicy_list(\n", " self.tab_group.request, tenant_id=tenant_id,\n", " name=filters['ikepolicy'])]\n", " del filters['ikepolicy']\n", " if 'ipsecpolicy' in filters:\n", " filters['ipsecpolicy_id'] = \\\n", " [i.id for i in api_vpn.ipsecpolicy_list(\n", " self.tab_group.request, tenant_id=tenant_id,\n", " name=filters['ipsecpolicy'])]\n", " del filters['ipsecpolicy']\n", " ipsecsiteconnections = api_vpn.ipsecsiteconnection_list(\n", " self.tab_group.request, tenant_id=tenant_id, **filters)\n", " except Exception:\n", " ipsecsiteconnections = []\n", " exceptions.handle(\n", " self.tab_group.request,\n", " _('Unable to retrieve IPSec Site Connections list.'))\n", " return ipsecsiteconnections\n", "\n", " def get_filters(self):\n", " self.table = self._tables['ipsecsiteconnectionstable']\n", " self.handle_server_filter(self.request, table=self.table)\n", " self.update_server_filter_action(self.request, table=self.table)\n", "\n", " return super(IPSecSiteConnectionsTab,\n", " self).get_filters(filters_map=self.FILTERS_MAPPING)\n", "\n", "\n", "class VPNServicesTab(tabs.TableTab, htables.DataTableView):\n", " table_classes = (tables.VPNServicesTable,)\n", " name = _(\"VPN Services\")\n", " slug = \"vpnservices\"\n", " template_name = (\"horizon/common/_detail_table.html\")\n", "\n", " def get_vpnservicestable_data(self):\n", " try:\n", " filters = self.get_filters()\n", " tenant_id = self.request.user.tenant_id\n", " if 'subnet_name' in filters:\n", " subnets = api.neutron.subnet_list(self.tab_group.request,\n", " tenant_id=tenant_id,\n", " cidr=filters['subnet_name'])\n", " subnets_ids = [n.id for n in subnets]\n", " del filters['subnet_name']\n", " if not subnets_ids:\n", " return []\n", " filters['subnet_id'] = subnets_ids\n", " if 'router_name' in filters:\n", " routers = api.neutron.router_list(self.tab_group.request,\n", " tenant_id=tenant_id,\n", " name=filters['router_name'])\n", " routers_ids = [r.id for r in routers]\n", " if not routers:\n", " return []\n", " filters['router_id'] = routers_ids\n", " vpnservices = api_vpn.vpnservice_list(\n", " self.tab_group.request, tenant_id=tenant_id, **filters)\n", " except Exception:\n", " vpnservices = []\n", " exceptions.handle(self.tab_group.request,\n", " _('Unable to retrieve VPN Services list.'))\n", " return vpnservices\n", "\n", " def get_filters(self):\n", " self.table = self._tables['vpnservicestable']\n", " self.handle_server_filter(self.request, table=self.table)\n", " self.update_server_filter_action(self.request, table=self.table)\n", "\n", " return super(VPNServicesTab, self).get_filters()\n", "\n", "\n", "class IKEPoliciesTab(tabs.TableTab, htables.DataTableView):\n", " table_classes = (tables.IKEPoliciesTable,)\n", " name = _(\"IKE Policies\")\n", " slug = \"ikepolicies\"\n", " template_name = (\"horizon/common/_detail_table.html\")\n", "\n", " def get_ikepoliciestable_data(self):\n", " try:\n", " filters = self.get_filters()\n", " tenant_id = self.request.user.tenant_id\n", " ikepolicies = api_vpn.ikepolicy_list(\n", " self.tab_group.request, tenant_id=tenant_id, **filters)\n", " except Exception:\n", " ikepolicies = []\n", " exceptions.handle(self.tab_group.request,\n", " _('Unable to retrieve IKE Policies list.'))\n", " return ikepolicies\n", "\n", " def get_filters(self):\n", " self.table = self._tables['ikepoliciestable']\n", " self.handle_server_filter(self.request, table=self.table)\n", " self.update_server_filter_action(self.request, table=self.table)\n", "\n", " return super(IKEPoliciesTab, self).get_filters()\n", "\n", "\n", "class IPSecPoliciesTab(tabs.TableTab, htables.DataTableView):\n", " table_classes = (tables.IPSecPoliciesTable,)\n", " name = _(\"IPSec Policies\")\n", " slug = \"ipsecpolicies\"\n", " template_name = (\"horizon/common/_detail_table.html\")\n", "\n", " def get_ipsecpoliciestable_data(self):\n", " try:\n", " filters = self.get_filters()\n", " tenant_id = self.request.user.tenant_id\n", " ipsecpolicies = api_vpn.ipsecpolicy_list(\n", " self.tab_group.request, tenant_id=tenant_id, **filters)\n", " except Exception:\n", " ipsecpolicies = []\n", " exceptions.handle(self.tab_group.request,\n", " _('Unable to retrieve IPSec Policies list.'))\n", " return ipsecpolicies\n", "\n", " def get_filters(self):\n", " self.table = self._tables['ipsecpoliciestable']\n", " self.handle_server_filter(self.request, table=self.table)\n", " self.update_server_filter_action(self.request, table=self.table)\n", "\n", " return super(IPSecPoliciesTab, self).get_filters()\n", "\n", "\n", "class VPNTabs(tabs.TabGroup):\n", " slug = \"vpntabs\"\n", " tabs = (IKEPoliciesTab, IPSecPoliciesTab,\n", " VPNServicesTab, IPSecSiteConnectionsTab,)\n", " sticky = True\n", "\n", "\n", "class IKEPolicyDetailsTab(tabs.Tab):\n", " name = _(\"IKE Policy Details\")\n", " slug = \"ikepolicydetails\"\n", " template_name = \"project/vpn/_ikepolicy_details.html\"\n", "\n", " def get_context_data(self, request):\n", " ikepolicy = self.tab_group.kwargs['ikepolicy']\n", " return {'ikepolicy': ikepolicy}\n", "\n", "\n", "class IKEPolicyDetailsTabs(tabs.TabGroup):\n", " slug = \"ikepolicytabs\"\n", " tabs = (IKEPolicyDetailsTab,)\n", "\n", "\n", "class IPSecPolicyDetailsTab(tabs.Tab):\n", " name = _(\"IPSec Policy Details\")\n", " slug = \"ipsecpolicydetails\"\n", " template_name = \"project/vpn/_ipsecpolicy_details.html\"\n", "\n", " def get_context_data(self, request):\n", " ipsecpolicy = self.tab_group.kwargs['ipsecpolicy']\n", " return {'ipsecpolicy': ipsecpolicy}\n", "\n", "\n", "class IPSecPolicyDetailsTabs(tabs.TabGroup):\n", " slug = \"ipsecpolicytabs\"\n", " tabs = (IPSecPolicyDetailsTab,)\n", "\n", "\n", "class VPNServiceDetailsTab(tabs.Tab):\n", " name = _(\"VPN Service Details\")\n", " slug = \"vpnservicedetails\"\n", " template_name = \"project/vpn/_vpnservice_details.html\"\n", "\n", " def get_context_data(self, request):\n", " vpnservice = self.tab_group.kwargs['vpnservice']\n", " return {'vpnservice': vpnservice}\n", "\n", "\n", "class VPNServiceDetailsTabs(tabs.TabGroup):\n", " slug = \"vpnservicetabs\"\n", " tabs = (VPNServiceDetailsTab,)\n", "\n", "\n", "class IPSecSiteConnectionDetailsTab(tabs.Tab):\n", " name = _(\"IPSec Site Connection Details\")\n", " slug = \"ipsecsiteconnectiondetails\"\n", " template_name = \"project/vpn/_ipsecsiteconnection_details.html\"\n", "\n", " def get_context_data(self, request):\n", " ipsecsiteconnection = self.tab_group.kwargs['ipsecsiteconnection']\n", " return {'ipsecsiteconnection': ipsecsiteconnection}\n", "\n", "\n", "class IPSecSiteConnectionDetailsTabs(tabs.TabGroup):\n", " slug = \"ipsecsiteconnectiontabs\"\n", " tabs = (IPSecSiteConnectionDetailsTab,)\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
233
0
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### bl_info = { "name": "UE4 Tools", "author": "LluisGarcia3D", "version": (1, 2, 4), "blender": (2, 7, 5), "location": "View3D > Tools > UE4 Tools", "description": "Adds some tools for improve the blender to unreal engine workflow", "warning": "", "wiki_url": "http://www.lluisgarcia.es/ue-tools-addon/", "category": "UE4 Tools"} import bpy from bpy.types import Scene from bpy.props import * from bpy.props import FloatVectorProperty from bpy.types import Operator, AddonPreferences, Panel, Menu import os from os import mkdir, listdir from os.path import dirname, exists, join from mathutils import Matrix from bpy.props import BoolProperty from bpy.props import * from bpy.app.handlers import persistent from math import radians @persistent def load_handler(dummy): #print("Load Handler:", bpy.data.filepath) Main_UI_Properties(bpy.context.scene) SetObjScale(bpy.context.scene) Rename_Properties(bpy.context.scene) FBX_Export_Properties(bpy.context.scene) Animation_UI_Properties(bpy.context.scene) bpy.app.handlers.load_post.append(load_handler) #------------------------------------------- #----------------VARIABLES------------------ #------------------------------------------- #UI Display Var Init = False UE_SceneTools = False UE_RenameTools = False UE_ExportTools = False UE_AnimationTools = False #Scene Settings Var Init = False ObjScale = False testvar = 0 #Batch Rename Vars ObjBaseNameSelector = "Custom" RenameObjects = True RenameData = True RenameMaterials = False PrefixSelector = False PrefixObj = True PrefixData = True PrefixMat = False CustomRename = "" Prefix="" name="" Collision= False #Animation Vars CustomShapesList = [] UE_IK_HeadInherit = False UE_IK_WaistInherit = False UE_IK_ONOFF = True UE_IK_ArmsInherit = False UE_IK_DeformBones = False UE_IK_helperBones = True UE_IK_Lock_R_hand = True UE_IK_Lock_L_hand = True UE_IK_Lock_R_foot = True UE_IK_Lock_L_foot = True UE_IK_Leg = True UE_IK_Arm = True UE_IK_Showbones = False UE_IK_ShowDeformbones = False UE_ShowAdvancedRigProp = False UE_ShowRigProps=True UE_ShowRigExport=False RIG_name = "HeroTPP" Include_hero = True HeroLow = False Rotate_character=False Rotate_Armature = False UE_Custom_RIG_Name = "" ACT_name = "New action" FakeActionName="" Steep1=False #FBX Export Vars FBX_name_multi = "" FBXBaseNameSelector = "Object" FBX_ExportCustomPath = "" FBXRelativeFolderSelector = True FBX_CustomExportName="" FBX_AssetType = "STATIC" FBX_Format = "BIN7400" FBX_ExportCollision= False FBX_Global_scale = 1 FBX_Tangent=True FBX_Bake_Anim = False FBX_NLA = False FBX_AllActions = False FBX_AnimStep = 1 FBX_AnimSimplify = 1 FBX_UseAnim = False FBX_AllActions61 = False FBX_AxisForward='-Z' FBX_AxisUp = 'Y' FBX_ShowAxis = False FBX_PivotToCenter = False FBX_Smoothing = 0 FBX_smooth_Type= "OFF" FBXSmoothingType = 'OFF' #------------------------------------------- #---------------UI Callbacks---------------- #------------------------------------------- #scene tools Active? def UE_Scene_Tools_Callback (scene,context): global UE_SceneTools UE_SceneTools = scene.UESceneTools #Rename tools Active? def UE_Rename_Tools_Callback (scene,context): global UE_RenameTools UE_RenameTools = scene.UERenameTools #Export tools Active? def UE_Export_Tools_Callback (scene,context): global UE_ExportTools UE_ExportTools = scene.UEExportTools #Animation tools Active? def UE_Animation_Tools_Callback (scene,context): global UE_AnimationTools UE_AnimationTools = scene.UEAnimationTools #------------------------------------------- #-----------------UI PROPS------------------ #------------------------------------------- def Main_UI_Properties(scn): #Scene Tools bpy.types.Scene.UESceneTools = BoolProperty( name = "Rename Data", default=False, update = UE_Scene_Tools_Callback, description = "Activate the Scene tools") scn['UESceneTools'] = False #Rename Tools bpy.types.Scene.UERenameTools = BoolProperty( name = "Rename Data", default=False, update = UE_Rename_Tools_Callback, description = "Activate Rename tools") scn['UERenameTools'] = False #Export Tools bpy.types.Scene.UEExportTools = BoolProperty( name = "Rename Data", default=False, update = UE_Export_Tools_Callback, description = "Activate Export tools") scn['UEExportTools'] = False #Animation Tools bpy.types.Scene.UEAnimationTools = BoolProperty( name = "Rename Data", default=False, update = UE_Animation_Tools_Callback, description = "Activate Animation tools") scn['UEAnimationTools'] = False #------------------------------------------- #--------STORE SCENE SETTINGS PROPS--------- #------------------------------------------- #--------------------- #Props Callbacks ----- #--------------------- def ObjScale_Callback (scene,context): global ObjScale ObjScale = scene.UEObjScale print (ObjScale) #--------------------- #Props --------------- #--------------------- def SetObjScale(scn): bpy.types.Scene.UEObjScale = BoolProperty( name = "Scale Selected Objects",update = ObjScale_Callback, description = "True or False?") scn['UEObjScale'] = ObjScale return #------------------------------------------- #---------SCENE SETTINGS FUNCTIONS---------- #------------------------------------------- #------------------------------------------- #---------BATCH RENAME CALLBACKS----------- #------------------------------------------- #base name callback def Obj_Base_Name_Selector_Callback (scene,context): global ObjBaseNameSelector ObjBaseNameSelector = scene.naming_base print ("Base name = " + str(ObjBaseNameSelector)) #Rename object selector callback def Rename_Objects_Callback (scene,context): global RenameObjects RenameObjects = scene.rename_object print ("Rename Objects = " + str(RenameObjects)) #Rename Data selector callback def Rename_Data_Callback (scene,context): global RenameData RenameData = scene.rename_data print ("Rename Data = " + str(RenameData)) #Rename Materials selector callback def Rename_materials_Callback (scene,context): global RenameMaterials RenameMaterials = scene.rename_material print ("Rename Materials = " + str(RenameMaterials)) #Add Prefix selector callback def Prefix_selector_Callback (scene,context): global PrefixSelector PrefixSelector = scene.rename_use_prefix print ("Add Prefix = " + str(PrefixSelector)) #Add Prefix to objects callback def Prefix_objects_Callback (scene,context): global PrefixObj PrefixObj = scene.prefix_object print ("Add Prefix to objects = " + str(PrefixObj)) #Add Prefix to Data callback def Prefix_data_Callback (scene,context): global PrefixData PrefixData = scene.prefix_data print ("Add Prefix to data = " + str(PrefixData)) #Add Prefix to Materials callback def Prefix_materials_Callback (scene,context): global PrefixMat PrefixMat = scene.prefix_material print ("Add Prefix to materials = " + str(PrefixMat)) #Is Collision callback def Collision_Callback (scene,context): global Collision Collision = scene.IsCollision print ("is a collisoin object = " + str(Collision)) #------------------------------------------- #---------BATCH RENAME PROPERTIES----------- #------------------------------------------- def Rename_Properties(scn): #name origins name_origins = [ ('Custom', 'Custom', 'Custom'), ('Object', 'Object', 'Object'), ('Mesh', 'Mesh', 'Mesh'), ('Material', 'Material', 'Material') ] #naming base bpy.types.Scene.naming_base = EnumProperty( items = name_origins, name = "Name Used:", default = "Custom", update = Obj_Base_Name_Selector_Callback, description='Base name for rename') scn['naming_base'] = 0 #custom name bpy.types.Scene.rename_custom = StringProperty( name = "Custom Name", default='New Name', description='Rename all with this String') scn['rename_custom'] = "" #Data to rename #objects? bpy.types.Scene.rename_object = BoolProperty( name = "Rename Objects", default=True, update = Rename_Objects_Callback, description = "Rename Objects") scn['rename_object'] = True #objects data? bpy.types.Scene.rename_data = BoolProperty( name = "Rename Data", default=True, update = Rename_Data_Callback, description = "Rename Object\'s Data") scn['rename_data'] = True #materials? bpy.types.Scene.rename_material = BoolProperty( name = "Rename Materials", default=False, update = Rename_materials_Callback, description = "Rename Objects\' Materials") scn['rename_material'] = False #Prefix selector bpy.types.Scene.rename_use_prefix = BoolProperty( name = "Add Prefix", default=False, update = Prefix_selector_Callback, description = "Prefix Object,data or material names") scn['rename_use_prefix'] = False #Custom Prefix bpy.types.Scene.rename_prefix = StringProperty( name = "Prefix", default='', description='Prefix name with this string') scn['rename_prefix'] = "" #Prefix on Objects? bpy.types.Scene.prefix_object = BoolProperty( name = "Object", default=True, update = Prefix_objects_Callback, description = "Prefix Objects names") scn['prefix_object'] = True #Prefix on Data? bpy.types.Scene.prefix_data = BoolProperty( name = "Data", default=True, update = Prefix_data_Callback, description = "Prefix Data Names") scn['prefix_data'] = True #Prefix on Materials? bpy.types.Scene.prefix_material = BoolProperty( name = "Material", default=False, update = Prefix_materials_Callback, description = "Prefix Material Names") scn['prefix_material'] = False #Is collision? bpy.types.Scene.IsCollision = BoolProperty( name = "Is Collision", default=False, update = Collision_Callback, description = "If Checked, add the collision prefix") scn['IsCollision'] = False #------------------------------------------- #---------BATCH RENAME FUNCTIONS------------ #------------------------------------------- #Get custom rename def Get_custom_rename(label, key, scn): global CustomRename CustomRename = scn["rename_custom"] #Get custom Prefix def Get_custom_prefix(label, key, scn): global Prefix Prefix = scn["rename_prefix"] #Get firts Material def get_first_material_name(ob): for m_slot in ob.material_slots: if m_slot.material: material_name = m_slot.material.name return material_name #main Function for rename def Rename_detablocks(self, context): obs = bpy.context.selected_objects sufix = 0 global PrefixSelector global Prefix scn = context.scene Get_custom_prefix("String: ", 'rename_prefix', scn) for ob in obs: #Get Base Names if ObjBaseNameSelector == 'Object': name = ob.name if ObjBaseNameSelector == 'Mesh': if ob.data: name = ob.data.name else: name = ob.name if ObjBaseNameSelector == 'Material': material_name = get_first_material_name(ob) if not material_name: name = ob.name else: name = material_name if ObjBaseNameSelector == 'Custom': name = CustomRename if Collision == True: Prefix = "UCX" PrefixSelector = True if PrefixSelector == True: print (Prefix) #Rename objects Names if RenameObjects == True: if (PrefixSelector == True and PrefixObj == True): ob.name = Prefix +"_"+ name print (ob.name) else: ob.name = name if (PrefixSelector == True and PrefixObj == True and Collision == True): ob.name = Prefix +"_"+ name +"_"+ str(sufix) #else: #ob.name = name #Rename objects data Names if RenameData == True: if (ob.data and ob.data.users == 1): if (PrefixSelector == True and PrefixData == True): ob.data.name = Prefix +"_"+ name else: ob.data.name = name if (PrefixSelector == True and PrefixData == True and Collision == True): ob.data.name = Prefix +"_"+ name +"_"+ str(sufix) #else: #ob.data.name = name #Rename material Names if RenameMaterials == True: if ob.material_slots: for m_slot in ob.material_slots: if m_slot.material: if m_slot.material.users == 1: if (PrefixSelector == True and PrefixMat == True): m_slot.material.name = Prefix +"_"+ name #else: #m_slot.material.name = name sufix = sufix + 1 #------------------------------------------- #------------FBX EXPORT CALLBACKS----------- #------------------------------------------- def FBX_Show_Axis_Callback (scene,context): global FBX_ShowAxis FBX_ShowAxis = scene.FBX_Show_Axis print ("Base name = " + str(FBX_ShowAxis)) def FBX_Axis_Forward_Callback (scene,context): global FBX_AxisForward FBX_AxisForward = scene.FBX_Axis_Forward print(FBX_AxisForward) def FBX_Axis_Up_Callback (scene,context): global FBX_AxisUp FBX_AxisUp = scene.FBX_Axis_Up print(FBX_AxisUp) def FBX_Smoothing_Selector_Callback (scene,context): global FBXSmoothingType FBXSmoothingType = scene.FBX_Smoothing print (str(FBXSmoothingType)) def FBX_Base_Name_Selector_Callback (scene,context): global FBXBaseNameSelector FBXBaseNameSelector = scene.FBX_base_name print ("Base name = " + str(FBXBaseNameSelector)) def FBX_Relative_Assets_Folder_Callback (scene,context): global FBXRelativeFolderSelector FBXRelativeFolderSelector = scene.FBX_Relative_Assets_Folder print ("Base name = " + str(FBXRelativeFolderSelector)) def FBX_Export_Collision_Callback (scene,context): global FBX_ExportCollision FBX_ExportCollision = scene.FBX_Export_Collision_Obj print ("Base name = " + str(FBX_ExportCollision)) def FBX_TangentSpace_Callback (scene,context): global FBX_Tangent FBX_Tangent = scene.FBX_TangentSpace print ("Base name = " + str(FBX_Tangent)) def FBX_BakeAnim_Callback (scene,context): global FBX_Bake_Anim FBX_Bake_Anim = scene.FBX_BakeAnim print ("Base name = " + str(FBX_Bake_Anim)) def FBX_NLA_Callback (scene,context): global FBX_NLA FBX_NLA = scene.FBX_Use_NLA print ("Base name = " + str(FBX_NLA)) def FBX_All_Actions_Callback (scene,context): global FBX_AllActions FBX_AllActions = scene.FBX_All_Actions print ("Base name = " + str(FBX_AllActions)) def FBX_Anim_Steep_Callback (scene,context): global FBX_AnimStep FBX_AnimStep = scene.FBX_Anim_Step print ("Base name = " + str(FBX_AnimStep)) def FBX_Anim_Simplify_Callback (scene,context): global FBX_AnimSimplify FBX_AnimSimplify = scene.FBX_Anim_Simplify print ("Base name = " + str(FBX_AnimSimplify)) def FBX_Use_Anim_Callback (scene,context): global FBX_UseAnim FBX_UseAnim = scene.FBX_Use_Anim print ("Base name = " + str(FBX_UseAnim)) def FBX_All_Actions_61_Callback (scene,context): global FBX_AllActions61 FBX_AllActions61 = scene.FBX_All_Actions_61 print ("Base name = " + str(FBX_AllActions61)) def FBX_Pivot_To_Center_Callback (scene,context): global FBX_PivotToCenter FBX_PivotToCenter = scene.FBX_Pivot_to_Center print ("Base name = " + str(FBX_PivotToCenter)) #------------------------------------------- #-----------FBX EXPORT PROPERTIES----------- #------------------------------------------- def FBX_Export_Properties(scn): #Use Smoothing faces? #Smoothing items FBX_smooth_Type = [ ('OFF', 'OFF', 'OFF'), ('FACE', 'FACE', 'FACE'), ('EDGE', 'EDGE' ,'EDGE') ] #Smoothing bpy.types.Scene.FBX_Smoothing = EnumProperty( items = FBX_smooth_Type, name = "FBX Smooth type used:", default = 'OFF', update = FBX_Smoothing_Selector_Callback, description='Smoothing type for the objects') scn['FBX_Smoothing'] = 0 #Pivot To center bpy.types.Scene.FBX_Pivot_to_Center = BoolProperty( name = "Pivot To Center", default=False, update = FBX_Pivot_To_Center_Callback, description = "Allow to Export objects with the correct pivot point without have the object at the center of the scene") scn['FBX_Pivot_to_Center'] = False #name origins FBX_name_origins = [ ('Object', 'Object', 'Object'), ('Custom', 'Custom', 'Custom') ] #naming base bpy.types.Scene.FBX_base_name = EnumProperty( items = FBX_name_origins, name = "FBX Export Name Used:", default = "Object", update = FBX_Base_Name_Selector_Callback, description='Base name for Export as FBX') scn['FBX_base_name'] = 0 #Show Axis? bpy.types.Scene.FBX_Show_Axis = BoolProperty( name = "Show Axis", default=False, update = FBX_Show_Axis_Callback, description = "Check for show Axis Orientation") scn['FBX_Show_Axis'] = False #Axis_Forward inputs FBX_Axis_ForwardList = [ ('X', "X Forward", ""), ('Y', "Y Forward", ""), ('Z', "Z Forward", ""), ('-X', "-X Forward", ""), ('-Y', "-Y Forward", ""), ('-Z', "-Z Forward", "") ] #Axis Fordware bpy.types.Scene.FBX_Axis_Forward = EnumProperty( items = FBX_Axis_ForwardList, name = "Forward", default = '-Z', update = FBX_Axis_Forward_Callback, description='Set the Forward Axis') scn['FBX_Axis_Forward'] = 5 #Axis_Up inputs FBX_Axis_UpList = [ ('X', "X Up", ""), ('Y', "Y Up", ""), ('Z', "Z Up", ""), ('-X', "-X Up", ""), ('-Y', "-Y Up", ""), ('-Z', "-Z Up", "") ] #Axis Up bpy.types.Scene.FBX_Axis_Up = EnumProperty( items = FBX_Axis_UpList, name = "Up", default = 'Y', update = FBX_Axis_Up_Callback, description='Set the Up Axis') scn['FBX_Axis_Up'] = 1 #custom name bpy.types.Scene.FBX_Export_Custom_Name = StringProperty( name = "FBX Custom Name", default='', #New Name description='Export Objects with a custom name') scn['FBX_Export_Custom_Name'] = "" #Export To relative path: UE ASsets bpy.types.Scene.FBX_Relative_Assets_Folder = BoolProperty( name = "UE Assets Folder", default=True, update = FBX_Relative_Assets_Folder_Callback, description = "Export into relative folder called: UE Assets") scn['FBX_Relative_Assets_Folder'] = True #custom Path bpy.types.Scene.FBX_Custom_Export_Path = StringProperty( name = "FBX Custom Folder", default='', #Custom Export Folder description='Export Objects To a custom Path', subtype = 'DIR_PATH') scn['FBX_Custom_Export_Path'] = "" #Export Collision Objects too bpy.types.Scene.FBX_Export_Collision_Obj = BoolProperty( name = "Export Collision Objects", default=False, update = FBX_Export_Collision_Callback, description = "Export Collision Objects along selected objects") scn['FBX_Export_Collision_Obj'] = False #Use Tangent Space ? bpy.types.Scene.FBX_TangentSpace = BoolProperty( name = "Export Collision Objects", default=True, update = FBX_TangentSpace_Callback, description = "Add binormal and tangent vectors, together with normal they form the tangent space (will only work correctly with tris/quads only meshes!") scn['FBX_TangentSpace'] = True #Use Bake anim ? bpy.types.Scene.FBX_BakeAnim = BoolProperty( name = "Use Bake", default=False, update = FBX_BakeAnim_Callback, description = "Export baked keyframe animation") scn['FBX_BakeAnim'] = False #Use NLA ? bpy.types.Scene.FBX_Use_NLA = BoolProperty( name = "Use NLA", default=False, update = FBX_NLA_Callback, description = "Export each non-muted NLA strip as a separated FBX’s AnimStack, if any, instead of global scene animation") scn['FBX_Use_NLA'] = False #Use All Actions ? bpy.types.Scene.FBX_All_Actions = BoolProperty( name = "Use All actions", default=True, update = FBX_All_Actions_Callback, description = "Export each action as a separated FBX’s AnimStack, instead of global scene animation") scn['FBX_All_Actions'] = True #Sampling Rate (anim_step) bpy.types.Scene.FBX_Anim_Step = FloatProperty( name = "Sampling Rate", default=1, min=0.01, max=100, update= FBX_Anim_Steep_Callback) scn['FBX_Anim_Step'] = 1 #Anim Simplify bpy.types.Scene.FBX_Anim_Simplify = FloatProperty( name = "Simplify", default=1, min=0, max=10, update= FBX_Anim_Simplify_Callback) scn['FBX_Anim_Simplify'] = 1 #------------------------------------------- #-----------FBX EXPORT FUNCTIONS------------ #------------------------------------------- # Grab values From Custom Path def Get_Custom_Path(label, key, scn): global FBX_ExportCustomPath FBX_ExportCustomPath = scn["FBX_Custom_Export_Path"] def Get_Custom_ExportName(label, key, scn): global FBX_CustomExportName FBX_CustomExportName = scn["FBX_Export_Custom_Name"] def FBX_SelectCollsionObjects (self,context): name = bpy.context.object.name obj = bpy.data.objects[name] activeLayer = bpy.context.scene.active_layer # Make visile all layers scn = bpy.context.scene for n in range(0,20): scn.layers[n] = True if FBX_PivotToCenter == False: obs = bpy.context.selected_objects for ob in obs: name = ob.name bpy.ops.object.select_pattern(extend=True, pattern="UCX_"+name+"_"+"*", case_sensitive=True) if FBX_PivotToCenter == True: #Extend the selection with All Collisio Objects bpy.ops.object.select_pattern(extend=True, pattern="UCX_"+name+"_"+"*", case_sensitive=True) def FBX_Make_Only_selectedObjLayer_visible (self,context): # Make visile only the active object layer name = bpy.context.object.name obj = bpy.data.objects[name] activeLayer = bpy.context.scene.active_layer objectLayer = [i for i in range(len(obj.layers)) if obj.layers[i] == True] for i in range(len(bpy.context.scene.layers)): if i not in objectLayer: bpy.context.scene.layers[i] = False else: bpy.context.scene.layers[i] = True if activeLayer not in objectLayer: bpy.context.scene.layers[activeLayer] = False def FBX_Export(self,context): global FBX_ExportCustomPath global FBX_name_multi scn = context.scene Get_Custom_Path("String: ", 'FBX_Custom_Export_Path', scn) Get_Custom_ExportName("String: ", 'FBX_Custom_Export_Path', scn) #Get Name if FBX_PivotToCenter == True: FBX_name = FBX_name_multi if FBX_PivotToCenter == False: if FBXBaseNameSelector == "Object": FBX_name = bpy.context.object.name #print(FBX_name) if FBXBaseNameSelector == "Custom": FBX_name = FBX_CustomExportName #print(FBX_name) #Paths #FBX_ExportRelativePath = bpy.path.relpath("//UE Assets") FBX_ExportRelativePath = bpy.path.abspath("//UE4 Assets") FBX_ExportCustom = bpy.path.abspath(FBX_ExportCustomPath) if FBXRelativeFolderSelector == True: Folder = FBX_ExportRelativePath if not exists(FBX_ExportRelativePath): mkdir(FBX_ExportRelativePath) if FBXRelativeFolderSelector == False: Folder = FBX_ExportCustom #Profiles: if FBX_PivotToCenter == False and FBX_ExportCollision == True: FBX_SelectCollsionObjects (self,context) else: print("no collision exported") #Export FBX bpy.ops.export_scene.fbx(check_existing=True, filepath= Folder + '/'+ FBX_name +'.fbx', filter_glob="*.fbx", version='BIN7400', use_selection=True, apply_unit_scale=True, axis_forward=FBX_AxisForward, axis_up=FBX_AxisUp, bake_space_transform=True, object_types= {'MESH'}, use_mesh_modifiers=True, mesh_smooth_type=FBXSmoothingType, use_mesh_edges=False, use_tspace=True, use_custom_props=True, path_mode='AUTO', embed_textures=False, batch_mode='OFF', use_batch_own_dir=False, use_metadata=True) if FBX_PivotToCenter == False and FBX_ExportCollision == True: bpy.ops.object.select_all(action='DESELECT') FBX_Make_Only_selectedObjLayer_visible (self,context) else: print("no collision exported") FBX_ExportCustomPath = "" print ("Export OK") #------------------------------------------- #-----------------RIG FUNCTIONS----------------- #------------------------------------------- #Rig Properties def Animation_UI_Properties(scn): #Show Rig Options bpy.types.Scene.UE_Show_Rig_Props= BoolProperty( name = "Show Rig Options", default=True, update = UE_Show_Rig_Props_Callback, description = "Show The options for the RIG") scn['UE_Show_Rig_Props'] = True #Show Rig Export Options bpy.types.Scene.UE_Show_Export_options= BoolProperty( name = "Show Export Options", default=False, update = UE_Show_Export_option_Callback, description = "Show Export Options for customize the fbx name,folder and scale") scn['UE_Show_Export_options'] = False #Rig Callbacks UE_ShowAdvanced_Rig_Prop_Callback def RIG_Name_Callback (scene,context): global UE_Custom_RIG_Name UE_Custom_RIG_Name= scene.Custom_RIG_name def UE_Show_Rig_Props_Callback (scene,context): global UE_ShowRigProps UE_ShowRigProps= scene.UE_Show_Rig_Props def UE_Show_Export_option_Callback (scene,context): global UE_ShowRigExport UE_ShowRigExport= scene.UE_Show_Export_options def ACT_Name_Callback (scene,context): global UE_Custom_ACT_Name UE_Custom_ACT_Name= scene.Custom_ACT_name #------------------------------------------- #-----------------RIG EXPORT------------------- #------------------------------------------- def FBX_Export_Character(self,context): scn = context.scene Get_Custom_Path("String: ", 'FBX_Custom_Export_Path', scn) Get_Custom_ExportName("String: ", 'FBX_Custom_Export_Path', scn) #Get Name if FBXBaseNameSelector == "Object": FBX_name = bpy.context.object.name if FBXBaseNameSelector == "Custom": FBX_name = FBX_CustomExportName objName=bpy.context.scene.objects.active.name #Paths #FBX_ExportRelativePath = bpy.path.relpath("//UE Assets") FBX_ExportRelativePath = bpy.path.abspath("//UE4 Assets/") FBX_Character_Path = FBX_ExportRelativePath + objName+"_Character" FBX_ExportCustom = bpy.path.abspath(FBX_ExportCustomPath) if FBXRelativeFolderSelector == True: Folder = FBX_Character_Path if not exists(FBX_ExportRelativePath): mkdir(FBX_ExportRelativePath) if not exists(FBX_Character_Path): mkdir(FBX_Character_Path) if FBXRelativeFolderSelector == False: Folder = FBX_ExportCustom #Export FBX bpy.ops.export_scene.fbx(check_existing=True, filepath= Folder + '/'+ FBX_name +'.fbx', filter_glob="*.fbx", version='BIN7400', use_selection=True, axis_forward=FBX_AxisForward, axis_up=FBX_AxisUp, bake_space_transform=False, apply_unit_scale=True, object_types={'ARMATURE', 'MESH'}, use_mesh_modifiers=True, mesh_smooth_type=FBXSmoothingType, use_mesh_edges=False, use_tspace=True, use_custom_props=False, add_leaf_bones=False, primary_bone_axis='Y', secondary_bone_axis='X', use_armature_deform_only=True, path_mode='AUTO', embed_textures=False, batch_mode='OFF', use_batch_own_dir=False, use_metadata=True) print ("Export OK") def FBX_Export_BakedAnimation(self,context): scn = context.scene Get_Custom_Path("String: ", 'FBX_Custom_Export_Path', scn) Get_Custom_ExportName("String: ", 'FBX_Custom_Export_Path', scn) #Get Name ActionName=bpy.context.active_object.animation_data.action.name objName=bpy.context.scene.objects.active.name if FBXBaseNameSelector == "Object": FBX_name = bpy.context.object.name + "_" +ActionName if FBXBaseNameSelector == "Custom": FBX_name = FBX_CustomExportName+ "_" +ActionName #Paths #FBX_ExportRelativePath = bpy.path.relpath("//UE Assets") FBX_ExportRelativePath = bpy.path.abspath("//UE4 Assets/") FBX_Character_Path = FBX_ExportRelativePath + objName+"_Character" FBX_Animation_Path = FBX_Character_Path+"/Animations" FBX_ExportCustom = bpy.path.abspath(FBX_ExportCustomPath) if FBXRelativeFolderSelector == True: Folder = FBX_Animation_Path if not exists(FBX_ExportRelativePath): mkdir(FBX_ExportRelativePath) if not exists(FBX_Character_Path): mkdir(FBX_Character_Path) if not exists(FBX_Animation_Path): mkdir(FBX_Animation_Path) if FBXRelativeFolderSelector == False: Folder = FBX_ExportCustom #Export FBX bpy.ops.export_scene.fbx(check_existing=True, filepath= Folder + '/'+ FBX_name +'.fbx', filter_glob="*.fbx", version='BIN7400', use_selection=True, apply_unit_scale=True, axis_forward=FBX_AxisForward, axis_up=FBX_AxisUp, bake_space_transform=False, object_types={'ARMATURE'}, add_leaf_bones=False, primary_bone_axis='Y', secondary_bone_axis='X', use_armature_deform_only=True, bake_anim=True, bake_anim_use_all_bones =True, bake_anim_use_nla_strips=False, bake_anim_use_all_actions=False, bake_anim_step=FBX_AnimStep, bake_anim_simplify_factor=FBX_AnimSimplify, use_anim=True, use_anim_action_all=False, use_default_take=False, use_anim_optimize=False, anim_optimize_precision=6.0, path_mode='AUTO', embed_textures=False, batch_mode='OFF', use_batch_own_dir=False, use_metadata=True) print ("Export OK") def FBX_Export_CameraAnimation(self,context): scn = context.scene Get_Custom_Path("String: ", 'FBX_Custom_Export_Path', scn) Get_Custom_ExportName("String: ", 'FBX_Custom_Export_Path', scn) #Get Name #ActionName=bpy.context.active_object.animation_data.action.name objName=bpy.context.scene.objects.active.name if FBXBaseNameSelector == "Object": FBX_name = bpy.context.object.name #+ "_" +ActionName if FBXBaseNameSelector == "Custom": FBX_name = FBX_CustomExportName #+ "_" +ActionName #Paths #FBX_ExportRelativePath = bpy.path.relpath("//UE Assets") FBX_ExportRelativePath = bpy.path.abspath("//UE4 Assets/") FBX_Animation_Path = FBX_ExportRelativePath+"/Camera_Animations" FBX_ExportCustom = bpy.path.abspath(FBX_ExportCustomPath) if FBXRelativeFolderSelector == True: Folder = FBX_Animation_Path if not exists(FBX_ExportRelativePath): mkdir(FBX_ExportRelativePath) if not exists(FBX_Animation_Path): mkdir(FBX_Animation_Path) if FBXRelativeFolderSelector == False: Folder = FBX_ExportCustom #Export FBX bpy.ops.export_scene.fbx(check_existing=True, filepath= Folder + '/'+ FBX_name +'.fbx', filter_glob="*.fbx", version='BIN7400', use_selection=True, apply_unit_scale=True, axis_forward=FBX_AxisForward, axis_up=FBX_AxisUp, bake_space_transform=False, object_types = {'CAMERA'}, add_leaf_bones=False, bake_anim=True, bake_anim_use_all_bones =False, bake_anim_use_nla_strips=False, bake_anim_use_all_actions=False, bake_anim_step=FBX_AnimStep, bake_anim_simplify_factor=FBX_AnimSimplify, use_anim=True, path_mode='AUTO', embed_textures=False, batch_mode='OFF', use_batch_own_dir=False, use_metadata=True) print ("Export OK") def UE_Export_Animation(self,context): #Get A list of objects parented to the selected armature ArmChildrenList = bpy.context.object.children BonesList = bpy.context.object.pose.bones BonesListEdit = bpy.context.object.data.edit_bones ob = bpy.context.object armature = ob.data objProps = bpy.context.object FakeAction=bpy.context.object.animation_data.action ArmatureGroups = bpy.context.active_object.pose.bone_groups Armature_Rotated=False animationFrames= bpy.context.object.animation_data.action.frame_range[1] bpy.context.scene.frame_end = animationFrames #Store bones with groups for export if bpy.context.active_object.type == 'ARMATURE': bpy.ops.object.mode_set( mode='POSE' ) bpy.ops.pose.select_all(action='DESELECT') DeformBonesList=[] EpicExtraBonesList=[] if "DeformBones" in ArmatureGroups: pb_group = ob.pose.bone_groups['DeformBones'] # the pose bone group we wanna select for bone in BonesList: if bone.bone_group == pb_group: DeformBonesList.append(bone.name) if "EpicExtra" in ArmatureGroups: pbe_group = ob.pose.bone_groups['EpicExtra'] # the pose bone group we wanna select for bone in BonesList: if bone.bone_group == pbe_group: EpicExtraBonesList.append(bone.name) #Separate Bones bpy.ops.object.mode_set( mode='EDIT' ) bpy.ops.armature.select_all(action='DESELECT') for bone in BonesListEdit: if bone.name in DeformBonesList: bone.use_deform = True elif bone.name in EpicExtraBonesList: bone.use_deform = True else: bone.use_deform = False bpy.ops.object.mode_set( mode='OBJECT' ) #Export Armature Animation FBX_Export_BakedAnimation(self,context) #for bone in BonesListEdit: #bone.use_deform = True del DeformBonesList[:] del EpicExtraBonesList[:] def hideIKArmsOFF(): BonesList = bpy.context.object.pose.bones ob = bpy.context.object for bone in BonesList: if bone.bone_group_index == 3: bonename = ob.data.bones[bone.name] ob.data.bones[bone.name].hide = False if bone.bone_group_index == 5: bonename = ob.data.bones[bone.name] ob.data.bones[bone.name].hide = False def hideIKArmsON(): BonesList = bpy.context.object.pose.bones ob = bpy.context.object for bone in BonesList: if bone.bone_group_index == 3: bonename = ob.data.bones[bone.name] ob.data.bones[bone.name].hide = True if bone.bone_group_index == 5: bonename = ob.data.bones[bone.name] ob.data.bones[bone.name].hide = True def hideIKlegOFF(): BonesList = bpy.context.object.pose.bones ob = bpy.context.object for bone in BonesList: if bone.bone_group_index == 2: bonename = ob.data.bones[bone.name] ob.data.bones[bone.name].hide = False if bone.bone_group_index == 4: bonename = ob.data.bones[bone.name] ob.data.bones[bone.name].hide = False def hideIKlegON(): BonesList = bpy.context.object.pose.bones ob = bpy.context.object for bone in BonesList: if bone.bone_group_index == 2: bonename = ob.data.bones[bone.name] ob.data.bones[bone.name].hide = True if bone.bone_group_index == 4: bonename = ob.data.bones[bone.name] ob.data.bones[bone.name].hide = True #------------------------------------------- #-----------------BUTTONS------------------- #------------------------------------------- #Export Camera Animation class UEExportCamera(bpy.types.Operator): """UE Export Camera Button""" bl_idname = "ue.export_camera" bl_label = "Export Camera Animation" def execute (self, context): bpy.ops.transform.rotate(value=1.5708, axis=(-0.143126, -0.0365628, 0.989029), constraint_axis=(False, True, False), constraint_orientation='LOCAL', mirror=False, proportional='DISABLED', proportional_edit_falloff='SMOOTH', proportional_size=1) FBX_Export_CameraAnimation(self,context) bpy.ops.transform.rotate(value=-1.5708, axis=(-0.143126, -0.0365625, 0.989029), constraint_axis=(False, True, False), constraint_orientation='LOCAL', mirror=False, proportional='DISABLED', proportional_edit_falloff='SMOOTH', proportional_size=1) return {'FINISHED'} #Set UE Scale button class UEScaleOperator(bpy.types.Operator): """UE Scale Operator Button""" bl_idname = "ue.scale_operator" bl_label = "Set UE Scale" def execute (self, context): scn = context.scene unit = context.scene.unit_settings #Set unit and scale lenght unit.system = 'METRIC' unit.scale_length = 0.01 context.space_data.clip_start = 0.1 context.space_data.clip_end = 1000000.0 print (unit.system) print (unit.scale_length) #Scale objects if selected if ObjScale == True: bpy.ops.view3d.snap_cursor_to_center() bpy.context.space_data.pivot_point = 'CURSOR' bpy.ops.transform.resize(value=(100, 100, 100)) bpy.ops.object.transform_apply(location=False, rotation=False, scale=True) bpy.context.space_data.pivot_point = 'MEDIAN_POINT' else: print ("scale objects is not selected,only will be set the scene scale") return {'FINISHED'} #Set Collision Pivots button class SetCollisionPivots_Button(bpy.types.Operator): """Button Set the pivot poit on collision objects""" bl_idname = "ue.setcollpivots_button" bl_label = "Set Collision Pivots" def execute (self, context): #Create group group = "CollisionPivotgroup" if group in bpy.data.groups: print ("Group already created,will be removed and created again") bpy.data.groups["CollisionPivotgroup"].user_clear() bpy.data.groups.remove(bpy.data.groups["CollisionPivotgroup"]) bpy.ops.group.create(name="CollisionPivotgroup") else: bpy.ops.group.create(name="CollisionPivotgroup") ActionGroup = bpy.data.groups["CollisionPivotgroup"] bpy.ops.object.select_all(action='DESELECT') #Group Operation for ob in ActionGroup.objects: print (ob.name) ob.select = True bpy.context.scene.objects.active = ob bpy.ops.view3d.snap_cursor_to_selected() FBX_SelectCollsionObjects (self,context) bpy.ops.object.origin_set(type='ORIGIN_CURSOR') ob.select = False bpy.ops.object.select_all(action='DESELECT') FBX_Make_Only_selectedObjLayer_visible (self,context) bpy.data.groups["CollisionPivotgroup"].user_clear() bpy.data.groups.remove(bpy.data.groups["CollisionPivotgroup"]) return {'FINISHED'} #Rename Button class Rename_Button(bpy.types.Operator): """Button for Rename""" bl_idname = "rename.button" bl_label = "RenameButton" @classmethod def poll(cls, context): return context.selected_objects != None def execute (self, context): scn = context.scene Get_custom_rename("String: ", 'rename_custom', scn) #Get_custom_prefix("String: ", 'rename_prefix', scn) Rename_detablocks(self, context) return {'FINISHED'} #Init button class InitUEToolsButton(bpy.types.Operator): """Init Main Properties""" bl_idname = "ue.init_button" bl_label = "InitButton" def execute (self, context): Main_UI_Properties(bpy.context.scene) SetObjScale(bpy.context.scene) Rename_Properties(bpy.context.scene) FBX_Export_Properties(bpy.context.scene) Animation_UI_Properties(bpy.context.scene) global Init Init = True return {'FINISHED'} #FBX Export Actions def FBX_Export_actions(self,context): if FBX_PivotToCenter == True: global FBX_name_multi scn = context.scene sufix = 0 #Create group group = "exportgroup" if group in bpy.data.groups: print ("Group already created") else: bpy.ops.group.create(name="exportgroup") ActionGroup = bpy.data.groups["exportgroup"] bpy.ops.object.select_all(action='DESELECT') Get_Custom_ExportName("String: ", 'FBX_Custom_Export_Path', scn) #Group Operation for ob in ActionGroup.objects: print(ob.name) ob.select = True bpy.context.scene.objects.active = ob if FBXBaseNameSelector == "Object": FBX_name_multi = ob.name if FBXBaseNameSelector == "Custom": FBX_name_multi = FBX_CustomExportName + "_" + str(sufix) #Store initial position obStartPosX = ob.location[0] obStartPosY = ob.location[1] obStartPosZ = ob.location[2] if FBX_ExportCollision == False: print("Collision Not Exported") if FBX_ExportCollision == True: FBX_SelectCollsionObjects (self,context) #move object to center bpy.ops.view3d.snap_cursor_to_center() bpy.ops.view3d.snap_selected_to_cursor(use_offset=False) #ob.location = (0,0,0) #Export FBX_Export(self,context) #Move to initial position ob.location = (obStartPosX,obStartPosY,obStartPosZ) bpy.ops.view3d.snap_cursor_to_active() bpy.ops.view3d.snap_selected_to_cursor(use_offset=False) ob.select = False if FBX_ExportCollision == False: print("Collision Not Exported") if FBX_ExportCollision == True: #FBX_SelectCollsionObjects (self,context) bpy.ops.object.select_all(action='DESELECT') FBX_Make_Only_selectedObjLayer_visible (self,context) sufix = sufix +1 bpy.data.groups["exportgroup"].user_clear() bpy.data.groups.remove(bpy.data.groups["exportgroup"]) #print("pivotOK") if FBX_PivotToCenter == False: FBX_Export(self,context) print("Export normally") #FBX Export button class FBX_ExportButton(bpy.types.Operator): """Button for Fbx Export""" bl_idname = "ue.export_fbx_button" bl_label = "ExportFbxButton" def execute (self, context): ActualPath = dirname(bpy.data.filepath) if FBXRelativeFolderSelector == True: if ActualPath == "": self.report({'ERROR'}, "You need Save the file for use save automatically into a relative folder") else: FBX_Export_actions(self,context) else: FBX_Export_actions(self,context) #print("test OK") return {'FINISHED'} #Choose Action Buttons class Action_buttons(bpy.types.Operator): """Select Action For bake the Animations""" bl_idname = "ue.action_change" bl_label = "Actions" act = bpy.props.StringProperty() def execute(self, context): print(self.act) bpy.context.active_object.animation_data.action = bpy.data.actions[self.act] return {'FINISHED'} #Delete Action Buttons class Delete_Action_buttons(bpy.types.Operator): """Delete actoin From List""" bl_idname = "ue.action_delete" bl_label = "Actions Delete" actdel = bpy.props.StringProperty() def execute(self, context): #Remove the new created animation action from the RIG armature bpy.context.object.animation_data.action = None actions = bpy.data.actions for action in actions: if action.name == self.actdel: bpy.data.actions[self.actdel].user_clear() for action in actions: if action.users == 0 : bpy.data.actions.remove(action) return {'FINISHED'} def ExportIKAnimation_proces(self,context): FakeAction=bpy.context.object.animation_data.action if bpy.context.object.animation_data.action != None: UE_Export_Animation(self,context) bpy.context.object.animation_data.action = FakeAction for action in bpy.data.actions: if action.users == 0 : action.user_clear() bpy.data.actions.remove(action) else: self.report({'ERROR'}, "The armature must have an action asigned") #Export IK Animation class Export_IK_animation(bpy.types.Operator): """Bake the animation from the helper bones to the deform bones and export animation""" bl_idname = "ue_export_anim.button" bl_label = "Export Animation" def execute (self, context): #global Rotate_Armature if FBXRelativeFolderSelector == True: ActualPath = dirname(bpy.data.filepath) if ActualPath == "": self.report({'ERROR'}, "You need Save the file for use save automatically into a relative folder") else: #Rotate_Armature = self.Rotate_Armature_180 ExportIKAnimation_proces(self,context) else: #Rotate_Armature = self.Rotate_Armature_180 ExportIKAnimation_proces(self,context) return {'FINISHED'} #Bake And Export All Animations def ExportAllAnims_proces(self,context): ActionList=bpy.data.actions FakeAction=bpy.context.object.animation_data.action BonesList = bpy.context.object.pose.bones #global Rotate_Armature for action in ActionList: if action.use_fake_user == True: bpy.context.object.animation_data.action = action UE_Export_Animation(self,context) bpy.context.object.animation_data.action = None bpy.ops.object.mode_set( mode='POSE' ) for bone in BonesList: bpy.ops.pose.loc_clear() bpy.ops.pose.rot_clear() bpy.ops.object.mode_set( mode='OBJECT' ) bpy.context.object.animation_data.action = FakeAction for action in ActionList: if action.users == 0 : action.user_clear() bpy.data.actions.remove(action) class ExportAllAnims(bpy.types.Operator): """bake and export all animations with Fake User""" bl_idname = "ue_export_all.button" bl_label = "Export All Animations" def execute (self, context): if FBXRelativeFolderSelector == True: ActualPath = dirname(bpy.data.filepath) if ActualPath == "": self.report({'ERROR'}, "You need Save the file for use save automatically into a relative folder") else: ExportAllAnims_proces(self,context) else: ExportAllAnims_proces(self,context) return {'FINISHED'} #Append Hero button class AppendHeroTPP(bpy.types.Operator): """Append The Hero Character and the Rig""" bl_idname = "ue.append_hero" bl_label = "Append Hero" Custom_RIG_name = StringProperty(name="Custom Name",update = RIG_Name_Callback) Include_Hero_value = BoolProperty(name="Include Hero Mesh?") Include_LowRes = BoolProperty(name="Movile version?") def execute (self, context): Include_hero = self.Include_Hero_value RIG_name= self.Custom_RIG_name HeroLow = self.Include_LowRes #Grab the ctive layer before the operation ActiveLayer = bpy.context.scene.layers.data.active_layer #ScriptName = bpy.data.texts['ue_tools_v1-2.py'].name #ScriptPath = bpy.data.texts['ue_tools_v1-2.py'].filepath ScriptDirectory = os.path.dirname(os.path.realpath(__file__)) #bpy.data.texts['ue_tools_v1-2.py'].filepath.strip(ScriptName) BlendFileName = "UE4_Mannequinn_Template.blend" TemplatePath = os.path.join(ScriptDirectory, BlendFileName, "Object", "SK_MannequinMesh") TemplatePathLow = os.path.join(ScriptDirectory, BlendFileName, "Object", "SK_Mannequin_Mobile") TemplateDirectory = os.path.join(ScriptDirectory, BlendFileName, "Object", "") RIG_Armature_name = RIG_name RIG_Mesh_name = RIG_name + "_MESH" if bpy.data.objects.get(RIG_Armature_name) is not None: self.report({'ERROR'}, "Please Give an unique name to the New RIG you already have one "+RIG_name+" on the scene") else: if Include_hero == True: if HeroLow == False: bpy.ops.wm.link(filepath= TemplatePath, directory= TemplateDirectory, filename="SK_MannequinMesh", link=True, relative_path=True, autoselect=True, active_layer=True) bpy.ops.object.make_local(type='ALL') bpy.context.scene.objects.active = bpy.data.objects["SK_MannequinMesh"] bpy.data.objects['SK_MannequinMesh'].select = True bpy.data.objects['SK_MannequinMesh'].name = RIG_Mesh_name bpy.ops.object.select_all(action='DESELECT') bpy.context.scene.objects.active = bpy.data.objects["HeroTPP_Character"] bpy.data.objects['HeroTPP_Character'].select = True bpy.data.objects['HeroTPP_Character'].name = RIG_Armature_name bpy.ops.object.mode_set( mode='POSE' ) if HeroLow == True: bpy.ops.wm.link(filepath= TemplatePathLow, directory= TemplateDirectory, filename="SK_Mannequin_Mobile", link=True, relative_path=True, autoselect=True, active_layer=True) bpy.ops.object.make_local(type='ALL') bpy.context.scene.objects.active = bpy.data.objects["SK_Mannequin_Mobile"] bpy.data.objects['SK_Mannequin_Mobile'].select = True bpy.data.objects['SK_Mannequin_Mobile'].name = RIG_Mesh_name bpy.ops.object.select_all(action='DESELECT') bpy.context.scene.objects.active = bpy.data.objects["HeroTPP_Character"] bpy.data.objects['HeroTPP_Character'].select = True bpy.ops.object.delete() bpy.context.scene.objects.active = bpy.data.objects["HeroTPP_Character_Mobile"] bpy.data.objects['HeroTPP_Character_Mobile'].select = True bpy.data.objects['HeroTPP_Character_Mobile'].name = RIG_Armature_name bpy.ops.object.mode_set( mode='POSE' ) else: bpy.ops.wm.link(filepath= TemplatePath, directory= TemplateDirectory, filename="SK_MannequinMesh", link=True, relative_path=True, autoselect=True, active_layer=True) bpy.ops.object.make_local(type='ALL') bpy.context.scene.objects.active = bpy.data.objects["SK_MannequinMesh"] bpy.data.objects['SK_MannequinMesh'].select = True bpy.ops.object.delete() bpy.context.scene.objects.active = bpy.data.objects["HeroTPP_Character"] bpy.data.objects['HeroTPP_Character'].select = True bpy.data.objects['HeroTPP_Character'].name = RIG_Armature_name bpy.ops.object.mode_set( mode='POSE' ) return {'FINISHED'} def invoke(self, context, event): global RIG_name, Include_hero , HeroLow self.Custom_RIG_name = RIG_name self.Include_Hero_value = Include_hero self.Include_LowRes = HeroLow return context.window_manager.invoke_props_dialog(self) return {'FINISHED'} def UE_ExportCharacter(self,context): #Get A list of objects parented to the selected armature ArmChildrenList = bpy.context.object.children BonesList = bpy.context.object.pose.bones BonesListEdit = bpy.context.object.data.edit_bones ob = bpy.context.object armature = ob.data objProps = bpy.context.object ArmatureGroups = bpy.context.active_object.pose.bone_groups #Store bones with groups for export if bpy.context.active_object.type == 'ARMATURE': bpy.ops.object.mode_set( mode='POSE' ) bpy.ops.pose.select_all(action='DESELECT') DeformBonesList=[] EpicExtraBonesList=[] if "DeformBones" in ArmatureGroups: pb_group = ob.pose.bone_groups['DeformBones'] # the pose bone group we wanna select for bone in BonesList: if bone.bone_group == pb_group: DeformBonesList.append(bone.name) if "EpicExtra" in ArmatureGroups: pbe_group = ob.pose.bone_groups['EpicExtra'] # the pose bone group we wanna select for bone in BonesList: if bone.bone_group == pbe_group: EpicExtraBonesList.append(bone.name) #Separate Bones bpy.ops.object.mode_set( mode='EDIT' ) bpy.ops.armature.select_all(action='DESELECT') for bone in BonesListEdit: if bone.name in DeformBonesList: bone.use_deform = True elif bone.name in EpicExtraBonesList: bone.use_deform = True else: bone.use_deform = False bpy.ops.object.mode_set( mode='OBJECT' ) #Export armature and child objects (No animation) FBX_Export_Character(self,context) #for bone in BonesListEdit: #bone.use_deform = True del DeformBonesList[:] del EpicExtraBonesList[:] #Export Character class UE_Export_Character(bpy.types.Operator): """Export Character""" bl_idname = "ue_export_character.button" bl_label = "Export Character" def execute (self, context): if FBXRelativeFolderSelector == True: ActualPath = dirname(bpy.data.filepath) if ActualPath == "": self.report({'ERROR'}, "You need Save the file for use save automatically into a relative folder") else: UE_ExportCharacter(self,context) else: UE_ExportCharacter(self,context) return {'FINISHED'} #Set Deform Bones Group (for no standar skeletons) class UE_Set_Deform_Bones(bpy.types.Operator): """Set Deform Bones for no standar skeletons""" bl_idname = "ue_set_deform_bones.button" bl_label = "Set Deform Bones for no Hero RIG skeletons" def execute (self, context): BoneList=bpy.context.object.data.bones BonesSelected = [] for bone in BoneList: if bone.select==True: BonesSelected.append(bone.name) if BonesSelected != []: bpy.ops.pose.group_assign(type=0) bpy.context.object.pose.bone_groups['Group'].name = "DeformBones" else: self.report({'ERROR'}, "You need select some bones") print("You need select some bone") return {'FINISHED'} #Go to pose mode for set the deform bones (for no standar skeletons) class UE_Set_POSE_mode(bpy.types.Operator): """Set pose mode for no standar skeletons""" bl_idname = "ue_set_podemode.button" bl_label = "Set Pose mode for no Hero RIG skeletons" def execute (self, context): global Steep1 bpy.ops.object.mode_set( mode='POSE' ) Steep1=True return {'FINISHED'} #Create Automatically DeformBones Gorup class UE_AutomaticBoneGroup_button(bpy.types.Operator): """Create the "DeformBones" group automatically""" bl_idname = "ue.deformbone_create" bl_label = "Deform Bones" def execute(self, context): ArmChildrenList = bpy.context.object.children BonesListEdit = bpy.context.object.data.edit_bones BoneList = bpy.context.object.pose.bones armObject = bpy.context.scene.objects.active for child in ArmChildrenList: bpy.data.objects[child.name].select = True nonZero = [] for child in ArmChildrenList: if child.type == 'MESH': for vert in child.data.vertices: # Get a list of the non-zero group weightings for the vertex for g in vert.groups: g.weight = round(g.weight, 4) if g.weight > .0000: if g.group not in nonZero: nonZero.append(g.group) nonZeroNames = [] BonesSelected = [] for child in ArmChildrenList: if child.type == 'MESH': vertexGroups = bpy.data.objects[child.name].vertex_groups for group in vertexGroups: gName=group.name gIndex=group.index if gIndex in nonZero: if gName not in nonZeroNames: nonZeroNames.append(gName) bpy.ops.object.select_all(action='DESELECT') bpy.data.objects[armObject.name].select = True bpy.context.scene.objects.active = bpy.data.objects[armObject.name] bpy.ops.object.mode_set( mode='POSE' ) #Store visible bone layers Bonelayers = bpy.context.object.data.layers VisibleBoneLayers=[] for layer in Bonelayers: if layer == True: VisibleBoneLayers.append(True) else: VisibleBoneLayers.append(False) #Enable All bone layers for n in range(0,32): bpy.context.object.data.layers[n] = True #Deselect All bones bpy.ops.pose.select_all(action='DESELECT') #Reselect the bones BonesSelected=[] for b in BoneList: if b.name in nonZeroNames: b.bone.select=True BonesSelected.append(b.name) #Asign The group if BonesSelected != []: bpy.ops.pose.group_assign() bpy.context.object.pose.bone_groups['Group'].name = "DeformBones" else: self.report({'ERROR'}, "Any bones have vertex associated") #Restore Visible Layers i=0 for n in range(0,32): bpy.context.object.data.layers[n] = VisibleBoneLayers[i] i=i+1 bpy.ops.object.mode_set( mode='OBJECT' ) return {'FINISHED'} #New action Button class UE_New_Action_Button(bpy.types.Operator): """Create a new Action""" bl_idname = "ue.action_new_button" bl_label = "New Action" Custom_ACT_name = StringProperty(name="Custom Action",update = ACT_Name_Callback) def execute (self, context): ACT_name= self.Custom_ACT_name print(ACT_name) bpy.data.actions.new(ACT_name) bpy.data.actions[ACT_name].use_fake_user = True ob = bpy.context.active_object if ob.animation_data == None: bpy.context.active_object.animation_data_create() ob.animation_data.action = bpy.data.actions[ACT_name] return {'FINISHED'} def invoke(self, context, event): global ACT_name self.Custom_ACT_name = ACT_name return context.window_manager.invoke_props_dialog(self) return {'FINISHED'} # RIG Props class UE_Rig_Props(bpy.types.Operator): """Set the value for the props on the Hero RIG""" bl_idname = "ue_rig_props.button" bl_label = "Set Propeties" RigProp = bpy.props.StringProperty() def execute (self, context): print(self.RigProp) #bpy.context.object["Constraints_ON_OFF"] = 1 if bpy.context.object[self.RigProp] == 0: bpy.context.object[self.RigProp] = 1 else: bpy.context.object[self.RigProp] = 0 # "Buttos CAllbacks" for IK True if self.RigProp == "IKMAIN": if bpy.context.object["IKMAIN"]==1: bpy.context.object["IKARMS"]=1 bpy.context.object["IKLEGS"]=1 bpy.context.object["Ik hand R Lock"]=1 bpy.context.object["Ik Hand L Lock"]=1 bpy.context.object["Ik Arm R"]=1.0 bpy.context.object["IK Arm L"]=1.0 bpy.context.object["Foot Lock L"]=1 bpy.context.object["Foot Lock R"]=1 bpy.context.object["Ik Leg L"]=1.0 bpy.context.object["Ik Leg R"]=1.0 hideIKArmsOFF() hideIKlegOFF() if bpy.context.object["IKMAIN"]==0: bpy.context.object["IKARMS"]=0 bpy.context.object["IKLEGS"]=0 bpy.context.object["Ik hand R Lock"]=0 bpy.context.object["Ik Hand L Lock"]=0 bpy.context.object["Ik Arm R"]=0.0 bpy.context.object["IK Arm L"]=0.0 bpy.context.object["Foot Lock L"]=0 bpy.context.object["Foot Lock R"]=0 bpy.context.object["Ik Leg L"]=0.0 bpy.context.object["Ik Leg R"]=0.0 hideIKArmsON() hideIKlegON() # "Buttos CAllbacks" for IK ARMS True if self.RigProp == "IKARMS": if bpy.context.object["IKARMS"]==1: bpy.context.object["Ik hand R Lock"]=1 bpy.context.object["Ik Hand L Lock"]=1 bpy.context.object["Ik Arm R"]=1.0 bpy.context.object["IK Arm L"]=1.0 hideIKArmsOFF() if bpy.context.object["IKARMS"]==0: bpy.context.object["Ik hand R Lock"]=0 bpy.context.object["Ik Hand L Lock"]=0 bpy.context.object["Ik Arm R"]=0.0 bpy.context.object["IK Arm L"]=0.0 hideIKArmsON() # "Buttos CAllbacks" for IK LEGS True if self.RigProp == "IKLEGS": if bpy.context.object["IKLEGS"]==1: bpy.context.object["Foot Lock L"]=1 bpy.context.object["Foot Lock R"]=1 bpy.context.object["Ik Leg L"]=1.0 bpy.context.object["Ik Leg R"]=1.0 hideIKlegOFF() if bpy.context.object["IKLEGS"]==0: bpy.context.object["Foot Lock L"]=0 bpy.context.object["Foot Lock R"]=0 bpy.context.object["Ik Leg L"]=0.0 bpy.context.object["Ik Leg R"]=0.0 hideIKlegON() return {'FINISHED'} #------------------------------------------- #------------------PANEL-------------------- #------------------------------------------- class Mainpanel(bpy.types.Panel): """A Custom Panel in the Viewport Toolbar""" bl_label = "UE4 Tools" bl_space_type = 'VIEW_3D' bl_region_type = 'TOOLS' #bl_category = 'Tools' bl_category = 'UE4 Tools' @classmethod def poll (self,context): return (bpy.context.mode == 'OBJECT' or 'POSE') def draw(self, context): scn = context.scene rs = bpy.context.scene if Init == False: layout = self.layout row = layout.row() col = row.column(align=True) col.operator ( "ue.init_button", text= "Open UE4 Tools",icon='VISIBLE_IPO_ON') if Init == True: #Main Buttons layout = self.layout row = layout.row() col = row.column(align=True) #col.operator ( "tests.button", text= "Test Button") col.separator() col.prop (scn, 'UESceneTools', text="Scene Tools",icon= 'SCENE_DATA') col.prop (scn, 'UERenameTools', text="Rename Tools",icon= 'GREASEPENCIL') col.prop (scn, 'UEAnimationTools', text="Animation Tools",icon= 'POSE_HLT') col.prop (scn, 'UEExportTools', text="Export Tools",icon= 'EXPORT') if (UE_SceneTools == True): #Scene Settings layout = self.layout box = layout.box() box.label ("Scene Settings",icon="SCENE_DATA") row = box.row() col = row.column(align=True) col.operator("ue.scale_operator", text="Set UE Scale", icon='ZOOM_SELECTED') col.prop(scn, 'UEObjScale') col.separator() col.label ("Additional Tools",icon="HELP") col.operator("ue.setcollpivots_button", text="Set Collision Pivots",icon='INLINK') if (UE_RenameTools == True): #Rename Settings box = layout.box() box.label ("Batch Rename Options",icon="SORTALPHA") box2 = box.box() row = box2.row() row.label(text='Base Name:') row = box2.row() row.prop(scn,'naming_base', expand=True) col = box2.column() col.prop(scn,'rename_custom') box3 = box.box() col = box3.column() col.label('Datablocks to rename:') col.prop(scn, 'rename_object') col.prop(scn, 'rename_data') col.prop(scn, 'rename_material') box4 = box.box() col= box4.column() col.label ("Prefix?") col.prop (scn, 'IsCollision', text= "Collider") col.prop(scn, 'rename_use_prefix', text="Custom Prefix") col.prop(scn, 'rename_prefix',text= "Custom") box4.label ("Where to add?") row = box4.row() row.prop(scn, 'prefix_object') row.prop(scn,'prefix_data') row.prop(scn, 'prefix_material') row = box.row(align = True) row.operator ("rename.button", text="Rename", icon='GREASEPENCIL') row = layout.row() if (UE_AnimationTools == True): box = layout.box() box.label ("Animation Tools",icon="POSE_HLT") col = box.column() #Button For append The character if context.mode == 'OBJECT': col.operator ( "ue.append_hero", text= "Append Hero RIG!",icon='VISIBLE_IPO_ON' ) #Check if I have selected object in context(prevent error if change layer) if bpy.context.selected_objects != []: # DO all this only if active object is an ARMATURE if bpy.context.active_object.type == 'CAMERA': col.operator ("ue.export_camera",text ="Export Camera Animation",icon='FORWARD') row=box.row() row.prop (scn, 'UE_Show_Export_options') if UE_ShowRigExport == True: box14=box.box() col=box14.column() col.prop (scn,'FBX_Show_Axis') col.prop (scn,'FBX_Anim_Step') col.prop (scn,'FBX_Anim_Simplify') if FBX_ShowAxis == True: col.prop (scn,'FBX_Axis_Forward') col.prop (scn,'FBX_Axis_Up') #name settings box6 = box.box() col= box6.column() row=box6.row(align=True) col.label(text='FBX Name:') row.prop(scn,'FBX_base_name', expand=True) col.prop(scn,'FBX_Export_Custom_Name',text = "Custom Name") #Folder settings box14 = box.box() col= box14.column() col.label ("Export Directory:") col.prop(scn,'FBX_Relative_Assets_Folder',text= "Relative: UE Assets") col.prop(scn,"FBX_Custom_Export_Path" ,text = "Custom Path") if bpy.context.active_object.type == 'ARMATURE': objProps = bpy.context.object ArmatureGroups = bpy.context.active_object.pose.bone_groups if context.mode == 'OBJECT': #Tools For bake Animations box8 = box.box() col = box8.column() col.label ("Export Anim Tools") if "DeformBones" in ArmatureGroups: col.operator ("ue_export_character.button",text = "Export Character", icon='FORWARD') col.operator ( "ue_export_anim.button", text= "Export Animation", icon='FORWARD') col.operator ( "ue_export_all.button" ,text = "Export All Animations", icon='FORWARD') row=box.row() row.prop (scn, 'UE_Show_Export_options') if UE_ShowRigExport == True: box14=box.box() col=box14.column() col.prop (scn, 'FBX_Smoothing') col.prop (scn,'FBX_Show_Axis') col.prop (scn,'FBX_Anim_Step') col.prop (scn,'FBX_Anim_Simplify') if FBX_ShowAxis == True: col.prop (scn,'FBX_Axis_Forward') col.prop (scn,'FBX_Axis_Up') #name settings box6 = box.box() col= box6.column() row=box6.row(align=True) col.label(text='FBX Name:') row.prop(scn,'FBX_base_name', expand=True) col.prop(scn,'FBX_Export_Custom_Name',text = "Custom Name") #Folder settings box14 = box.box() col= box14.column() col.label ("Export Directory:") col.prop(scn,'FBX_Relative_Assets_Folder',text= "Relative: UE Assets") col.prop(scn,"FBX_Custom_Export_Path" ,text = "Custom Path") else: col.alignment='CENTER' col.label("Steep 1:",icon='INFO') col.label("For use this tools this") col.label("armature must have a bone") col.label("group called 'DeformBones'.") col.operator( "ue_set_podemode.button", text= "Manual Creation") col.operator( "ue.deformbone_create", text= "Auto Creation!") if context.mode == 'POSE': if "HeroRIG" not in objProps: if Steep1 == 1: if "DeformBones" not in ArmatureGroups: box8 = box.box() col = box8.column() col.label("Steep 2:",icon='INFO') col.label("Select The bones you") col.label("want to Export and ") col.label("press the button below") col.operator ("ue_set_deform_bones.button", text = "Set Deform Bones") if "HeroRIG" in objProps: row=box.row() #row.prop(scn,'UE_Show_Rig_Props',text="Show Rig Options") if bpy.context.object["ShowRiGoptions"]==0: row.operator("ue_rig_props.button", text="Show RIG Options",icon='CHECKBOX_DEHLT').RigProp="ShowRiGoptions" if bpy.context.object["ShowRiGoptions"]==1: row.operator("ue_rig_props.button", text="Show RIG Options",icon='CHECKBOX_HLT').RigProp="ShowRiGoptions" if bpy.context.object["ShowRiGoptions"]==1: box9 = box.box() box9.label("Rig Options") row=box9.row() if bpy.context.object["ShowAdvancedProps"]==0: row.operator("ue_rig_props.button", text="Advanced Options",icon='CHECKBOX_DEHLT').RigProp="ShowAdvancedProps" if bpy.context.object["ShowAdvancedProps"]==1: row.operator("ue_rig_props.button", text="Advanced Options",icon='CHECKBOX_HLT').RigProp="ShowAdvancedProps" row=box9.row() if bpy.context.object.show_x_ray == True: row.prop (context.object, "show_x_ray",text="X Ray", expand=True,icon='RESTRICT_VIEW_OFF') else: row.prop (context.object, "show_x_ray",text="X Ray", expand=True,icon='RESTRICT_VIEW_ON') if bpy.context.object.data.show_names == True: row.prop (context.object.data, "show_names",text="Names", expand=True,icon='RESTRICT_VIEW_OFF') else: row.prop (context.object.data, "show_names",text="Names", expand=True,icon='RESTRICT_VIEW_ON') if bpy.context.object.data.show_axes == True: row.prop (context.object.data, "show_axes",text="Axes", expand=True,icon='RESTRICT_VIEW_OFF') else: row.prop (context.object.data, "show_axes",text="Axes", expand=True,icon='RESTRICT_VIEW_ON') row=box9.row() #row.prop(scn,'UE_ShowAdvanced_Rig_Prop',text= "Show Advanced Options?") row = box9.row(align=True) row.prop(context.active_object.data, 'layers', index=0, toggle=True, text='Deform Bones',icon='BONE_DATA') row.prop(context.active_object.data, 'layers', index=2, toggle=True, text='Helper Bones',icon='POSE_DATA') #Show Constraints Button row = box9.row() if bpy.context.object["Constraints_ON_OFF"]==0: row.operator("ue_rig_props.button", text="Constraints ON",icon='LINKED').RigProp="Constraints_ON_OFF" if bpy.context.object["Constraints_ON_OFF"]==1: row.operator("ue_rig_props.button", text="Constraints OFF",icon='UNLINKED').RigProp="Constraints_ON_OFF" #Show IK Swith Button row = box9.row(align=True) #row.prop(scn,'UEAnimationIK_ONOFF',icon= 'CONSTRAINT') if bpy.context.object["IKMAIN"]==0: row.operator("ue_rig_props.button", text="IK OFF",icon='UNLINKED').RigProp="IKMAIN" if bpy.context.object["IKMAIN"]==1: row.operator("ue_rig_props.button", text="IK ON",icon='LINKED').RigProp="IKMAIN" #Show IK arms and legs Buttons if bpy.context.object["IKMAIN"]==1: row = box9.row() #Show Buttons for arms if bpy.context.object["IKARMS"]==0: row.operator("ue_rig_props.button", text="IK Arms",icon='RESTRICT_VIEW_ON').RigProp="IKARMS" if bpy.context.object["IKARMS"]==1: row.operator("ue_rig_props.button", text="IK Arms",icon='RESTRICT_VIEW_OFF').RigProp="IKARMS" #Show buttons for legs if bpy.context.object["IKLEGS"]==0: row.operator("ue_rig_props.button", text="IK Legs",icon='RESTRICT_VIEW_ON').RigProp="IKLEGS" if bpy.context.object["IKLEGS"]==1: row.operator("ue_rig_props.button", text="IK Legs",icon='RESTRICT_VIEW_OFF').RigProp="IKLEGS" #SHow IK value Bars if bpy.context.object["IKARMS"]==1: row = box9.row() row.prop (context.object, '["Ik Arm R"]', slider = True) row = box9.row() row.prop (context.object, '["IK Arm L"]', slider = True) if bpy.context.object["IKLEGS"]==1: row = box9.row() row.prop (context.object, '["Ik Leg R"]', slider = True) row = box9.row() row.prop (context.object, '["Ik Leg L"]', slider = True) #Show Ik Loks if bpy.context.object["IKARMS"]==1: #Show button for Lock R hand row = box9.row() if bpy.context.object["Ik hand R Lock"]==0: row.operator("ue_rig_props.button", text="Hand R",icon='UNLOCKED').RigProp="Ik hand R Lock" if bpy.context.object["Ik hand R Lock"]==1: row.operator("ue_rig_props.button", text="Hand R",icon='LOCKED').RigProp="Ik hand R Lock" #Show button for Lock L hand if bpy.context.object["Ik Hand L Lock"]==0: row.operator("ue_rig_props.button", text="Hand L",icon='UNLOCKED').RigProp="Ik Hand L Lock" if bpy.context.object["Ik Hand L Lock"]==1: row.operator("ue_rig_props.button", text="Hand L",icon='LOCKED').RigProp="Ik Hand L Lock" #Show Slider for animate hand lock if bpy.context.object["ShowAdvancedProps"]==1: row = box9.row() row.prop (context.object, '["Ik hand R Lock"]',text="Hand R", slider = True) row.prop (context.object, '["Ik Hand L Lock"]',text="Hand L", slider = True) if bpy.context.object["IKLEGS"]==1: #Show button for Lock R Foot row = box9.row() if bpy.context.object["Foot Lock R"]==0: row.operator("ue_rig_props.button", text="Foot R",icon='UNLOCKED').RigProp="Foot Lock R" if bpy.context.object["Foot Lock R"]==1: row.operator("ue_rig_props.button", text="Foot R",icon='LOCKED').RigProp="Foot Lock R" #Show button for Lock L Foot if bpy.context.object["Foot Lock L"]==0: row.operator("ue_rig_props.button", text="Foot R",icon='UNLOCKED').RigProp="Foot Lock L" if bpy.context.object["Foot Lock L"]==1: row.operator("ue_rig_props.button", text="Foot R",icon='LOCKED').RigProp="Foot Lock L" #Show Slider for animate hand lock if bpy.context.object["ShowAdvancedProps"]==1: row = box9.row() row.prop (context.object, '["Foot Lock R"]',text="Hand R", slider = True) row.prop (context.object, '["Foot Lock L"]',text="Hand L", slider = True) row=box9.row() row.label("Inherit Rotation:",icon='GROUP_BONE') row=box9.row() if bpy.context.object["Head inherit Rotation"]==0: row.operator("ue_rig_props.button", text="Head",icon='CHECKBOX_DEHLT',emboss=False).RigProp="Head inherit Rotation" if bpy.context.object["Head inherit Rotation"]==1: row.operator("ue_rig_props.button", text="Head",icon='CHECKBOX_HLT',emboss=False).RigProp="Head inherit Rotation" if bpy.context.object["Arms inherit Rotation"]==0: row.operator("ue_rig_props.button", text="Arms",icon='CHECKBOX_DEHLT',emboss=False).RigProp="Arms inherit Rotation" if bpy.context.object["Arms inherit Rotation"]==1: row.operator("ue_rig_props.button", text="Arms",icon='CHECKBOX_HLT',emboss=False).RigProp="Arms inherit Rotation" if bpy.context.object["Waist Inherit Rotation"]==0: row.operator("ue_rig_props.button", text="Waist",icon='CHECKBOX_DEHLT',emboss=False).RigProp="Waist Inherit Rotation" if bpy.context.object["Waist Inherit Rotation"]==1: row.operator("ue_rig_props.button", text="Waist",icon='CHECKBOX_HLT', emboss=False).RigProp="Waist Inherit Rotation" row=box9.row() if bpy.context.object["ShowAdvancedProps"]==1: row.prop (context.object, '["Head inherit Rotation"]',text="Head inherit rotation", slider = True) row.prop (context.object, '["Arms inherit Rotation"]',text="Arms inherit rotation", slider = True) row.prop (context.object, '["Waist Inherit Rotation"]',text="Wais inherit rotation", slider = True) #DIsplay The Faked Actions stored on Data actions = bpy.data.actions box = layout.box() box.label("Available Actions", icon='ACTION') row=box.row() row=box.row(align=True) row.alignment = 'RIGHT' row.operator ("ue.action_new_button",icon='ZOOMIN') box12=box.box() row=box12.row() col = row.column() for action in actions: if action.use_fake_user == True: col.operator("ue.action_change", text=action.name,).act=action.name col = row.column() for action in actions: if action.use_fake_user == True: col.operator("ue.action_delete",icon='X', text="").actdel=action.name if (UE_ExportTools == True): #FBX Export Settings layout = self.layout box = layout.box() box.label ("Export Tools",icon="EXPORT") #General settings box5 = box.box() col = box5.column() row = box5.row() #row.prop (scn, 'FBX_AssetTypeSelector', expand=True) row.label ("FBX Settings:") col = box5.column() col.prop (scn, 'FBX_Pivot_to_Center') col.prop (scn,'FBX_Export_Collision_Obj',text= "Export collision") col.prop (scn,'FBX_Show_Axis') if FBX_ShowAxis == True: col.prop (scn,'FBX_Axis_Forward') col.prop (scn,'FBX_Axis_Up') col.prop (scn, 'FBX_Smoothing') #name settings box6 = box.box() row = box6.row() row.label(text='FBX Name:') row = box6.row() row.prop(scn,'FBX_base_name', expand=True) col = box6.column() col.prop(scn,'FBX_Export_Custom_Name',text = "Custom Name") #Folder settings box7 = box.box() col = box7.column() col.label ("Export Directory:") col.prop(scn,'FBX_Relative_Assets_Folder',text= "Relative: UE4 Assets") col.prop(scn,"FBX_Custom_Export_Path" ,text = "Custom Path") col = box.column() col.operator ( "ue.export_fbx_button", text= "FBX Export",icon='FORWARD') #------------------------------------------- #-----------------REGISTER------------------ #------------------------------------------- classes = [ SetCollisionPivots_Button, FBX_ExportButton, InitUEToolsButton, Rename_Button, UE_Export_Character, UEScaleOperator, AppendHeroTPP, Mainpanel, Export_IK_animation, ExportAllAnims, Action_buttons, UE_Set_Deform_Bones, UE_Set_POSE_mode, UE_Rig_Props, UE_New_Action_Button, Delete_Action_buttons, UE_AutomaticBoneGroup_button, UEExportCamera ] def register(): for c in classes: bpy.utils.register_class(c) def unregister(): for c in classes: bpy.utils.unregister_class(c) if __name__ == "__main__": register()
[ "# ##### BEGIN GPL LICENSE BLOCK #####\n", "#\n", "# This program is free software; you can redistribute it and/or\n", "# modify it under the terms of the GNU General Public License\n", "# as published by the Free Software Foundation; either version 2\n", "# of the License, or (at your option) any later version.\n", "#\n", "# This program is distributed in the hope that it will be useful,\n", "# but WITHOUT ANY WARRANTY; without even the implied warranty of\n", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n", "# GNU General Public License for more details.\n", "#\n", "# You should have received a copy of the GNU General Public License\n", "# along with this program; if not, write to the Free Software Foundation,\n", "# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n", "#\n", "# ##### END GPL LICENSE BLOCK #####\n", "\n", "bl_info = {\n", " \"name\": \"UE4 Tools\",\n", " \"author\": \"LluisGarcia3D\",\n", " \"version\": (1, 2, 4),\n", " \"blender\": (2, 7, 5),\n", " \"location\": \"View3D > Tools > UE4 Tools\",\n", " \"description\": \"Adds some tools for improve the blender to unreal engine workflow\",\n", " \"warning\": \"\",\n", " \"wiki_url\": \"http://www.lluisgarcia.es/ue-tools-addon/\",\n", " \"category\": \"UE4 Tools\"}\n", "\n", "\n", "\n", "import bpy\n", "from bpy.types import Scene\n", "from bpy.props import *\n", "from bpy.props import FloatVectorProperty\n", "from bpy.types import Operator, AddonPreferences, Panel, Menu\n", "import os\n", "from os import mkdir, listdir\n", "from os.path import dirname, exists, join\n", "from mathutils import Matrix\n", "from bpy.props import BoolProperty\n", "from bpy.props import *\n", "from bpy.app.handlers import persistent\n", "from math import radians\n", "\n", "@persistent\n", "def load_handler(dummy):\n", " #print(\"Load Handler:\", bpy.data.filepath)\n", " \n", " Main_UI_Properties(bpy.context.scene)\n", " SetObjScale(bpy.context.scene)\n", " Rename_Properties(bpy.context.scene)\n", " FBX_Export_Properties(bpy.context.scene)\n", " Animation_UI_Properties(bpy.context.scene)\n", " \n", "bpy.app.handlers.load_post.append(load_handler) \n", "\n", "\n", "\n", "#-------------------------------------------\n", "#----------------VARIABLES------------------\n", "#-------------------------------------------\n", "#UI Display Var\n", "\n", "Init = False\n", "\n", "UE_SceneTools = False\n", "UE_RenameTools = False\n", "UE_ExportTools = False\n", "UE_AnimationTools = False\n", "\n", "\n", "#Scene Settings Var\n", "Init = False\n", "ObjScale = False\n", "testvar = 0\n", "\n", "\n", "#Batch Rename Vars\n", "ObjBaseNameSelector = \"Custom\"\n", "RenameObjects = True\n", "RenameData = True\n", "RenameMaterials = False\n", "PrefixSelector = False\n", "PrefixObj = True\n", "PrefixData = True\n", "PrefixMat = False\n", "CustomRename = \"\"\n", "Prefix=\"\"\n", "name=\"\"\n", "Collision= False\n", "\n", "\n", "#Animation Vars\n", "\n", "CustomShapesList = []\n", "UE_IK_HeadInherit = False\n", "UE_IK_WaistInherit = False\n", "UE_IK_ONOFF = True\n", "UE_IK_ArmsInherit = False\n", "UE_IK_DeformBones = False\n", "UE_IK_helperBones = True\n", "UE_IK_Lock_R_hand = True\n", "UE_IK_Lock_L_hand = True\n", "UE_IK_Lock_R_foot = True\n", "UE_IK_Lock_L_foot = True\n", "\n", "\n", "\n", "UE_IK_Leg = True\n", "UE_IK_Arm = True\n", "UE_IK_Showbones = False\n", "UE_IK_ShowDeformbones = False\n", "UE_ShowAdvancedRigProp = False\n", "UE_ShowRigProps=True\n", "UE_ShowRigExport=False\n", "\n", "\n", "RIG_name = \"HeroTPP\"\n", "Include_hero = True\n", "HeroLow = False\n", "Rotate_character=False\n", "Rotate_Armature = False\n", "UE_Custom_RIG_Name = \"\"\n", "ACT_name = \"New action\" \n", "FakeActionName=\"\"\n", "Steep1=False\n", "\n", "\n", "#FBX Export Vars\n", "\n", "FBX_name_multi = \"\"\n", "FBXBaseNameSelector = \"Object\"\n", "FBX_ExportCustomPath = \"\"\n", "FBXRelativeFolderSelector = True\n", "FBX_CustomExportName=\"\"\n", "FBX_AssetType = \"STATIC\"\n", "FBX_Format = \"BIN7400\"\n", "FBX_ExportCollision= False\n", "FBX_Global_scale = 1\n", "FBX_Tangent=True\n", "FBX_Bake_Anim = False\n", "FBX_NLA = False\n", "FBX_AllActions = False\n", "FBX_AnimStep = 1\n", "FBX_AnimSimplify = 1\n", "FBX_UseAnim = False\n", "FBX_AllActions61 = False\n", "FBX_AxisForward='-Z'\n", "FBX_AxisUp = 'Y'\n", "FBX_ShowAxis = False\n", "FBX_PivotToCenter = False\n", "FBX_Smoothing = 0\n", "FBX_smooth_Type= \"OFF\"\n", "FBXSmoothingType = 'OFF'\n", "\n", "\n", "#-------------------------------------------\n", "#---------------UI Callbacks----------------\n", "#-------------------------------------------\n", "#scene tools Active?\n", "def UE_Scene_Tools_Callback (scene,context): \n", " \n", " global UE_SceneTools\n", " UE_SceneTools = scene.UESceneTools\n", " \n", " \n", "#Rename tools Active?\n", "def UE_Rename_Tools_Callback (scene,context): \n", " \n", " global UE_RenameTools\n", " UE_RenameTools = scene.UERenameTools\n", " \n", " \n", "#Export tools Active?\n", "def UE_Export_Tools_Callback (scene,context): \n", " \n", " global UE_ExportTools\n", " UE_ExportTools = scene.UEExportTools\n", " \n", "\n", "#Animation tools Active?\n", "def UE_Animation_Tools_Callback (scene,context): \n", " \n", " global UE_AnimationTools\n", " UE_AnimationTools = scene.UEAnimationTools\n", "\n", "#-------------------------------------------\n", "#-----------------UI PROPS------------------\n", "#-------------------------------------------\n", "def Main_UI_Properties(scn):\n", " #Scene Tools \n", " bpy.types.Scene.UESceneTools = BoolProperty(\n", " name = \"Rename Data\",\n", " default=False,\n", " update = UE_Scene_Tools_Callback, \n", " description = \"Activate the Scene tools\")\n", " scn['UESceneTools'] = False\n", " \n", " #Rename Tools \n", " bpy.types.Scene.UERenameTools = BoolProperty(\n", " name = \"Rename Data\",\n", " default=False,\n", " update = UE_Rename_Tools_Callback, \n", " description = \"Activate Rename tools\")\n", " scn['UERenameTools'] = False\n", "\n", " #Export Tools\n", " bpy.types.Scene.UEExportTools = BoolProperty(\n", " name = \"Rename Data\",\n", " default=False,\n", " update = UE_Export_Tools_Callback, \n", " description = \"Activate Export tools\")\n", " scn['UEExportTools'] = False\n", " \n", " #Animation Tools\n", " bpy.types.Scene.UEAnimationTools = BoolProperty(\n", " name = \"Rename Data\",\n", " default=False,\n", " update = UE_Animation_Tools_Callback, \n", " description = \"Activate Animation tools\")\n", " scn['UEAnimationTools'] = False\n", "\n", "\n", "#-------------------------------------------\n", "#--------STORE SCENE SETTINGS PROPS---------\n", "#-------------------------------------------\n", "\n", "#---------------------\n", "#Props Callbacks -----\n", "#---------------------\n", "\n", "def ObjScale_Callback (scene,context): \n", " \n", " global ObjScale\n", " ObjScale = scene.UEObjScale \n", " print (ObjScale)\n", "\n", "\n", "#---------------------\n", "#Props ---------------\n", "#---------------------\n", "def SetObjScale(scn): \n", " \n", " bpy.types.Scene.UEObjScale = BoolProperty(\n", " name = \"Scale Selected Objects\",update = ObjScale_Callback,\n", " description = \"True or False?\") \n", " scn['UEObjScale'] = ObjScale\n", " return\n", "\n", "\n", "#-------------------------------------------\n", "#---------SCENE SETTINGS FUNCTIONS----------\n", "#-------------------------------------------\n", "\n", "\n", "\n", "\n", "#-------------------------------------------\n", "#---------BATCH RENAME CALLBACKS-----------\n", "#-------------------------------------------\n", "\n", "#base name callback\n", "def Obj_Base_Name_Selector_Callback (scene,context): \n", " \n", " global ObjBaseNameSelector\n", " ObjBaseNameSelector = scene.naming_base \n", " print (\"Base name = \" + str(ObjBaseNameSelector)) \n", " \n", "\n", "#Rename object selector callback\n", "def Rename_Objects_Callback (scene,context):\n", " \n", " global RenameObjects \n", " RenameObjects = scene.rename_object \n", " print (\"Rename Objects = \" + str(RenameObjects))\n", "\n", "#Rename Data selector callback \n", "def Rename_Data_Callback (scene,context):\n", " \n", " global RenameData \n", " RenameData = scene.rename_data \n", " print (\"Rename Data = \" + str(RenameData))\n", "\n", "#Rename Materials selector callback \n", "def Rename_materials_Callback (scene,context):\n", " \n", " global RenameMaterials \n", " RenameMaterials = scene.rename_material \n", " print (\"Rename Materials = \" + str(RenameMaterials))\n", "\n", "#Add Prefix selector callback\n", "\n", "def Prefix_selector_Callback (scene,context):\n", " \n", " global PrefixSelector \n", " PrefixSelector = scene.rename_use_prefix \n", " print (\"Add Prefix = \" + str(PrefixSelector)) \n", "\n", "#Add Prefix to objects callback\n", "def Prefix_objects_Callback (scene,context):\n", " \n", " global PrefixObj \n", " PrefixObj = scene.prefix_object\n", " print (\"Add Prefix to objects = \" + str(PrefixObj)) \n", "\n", "#Add Prefix to Data callback\n", "def Prefix_data_Callback (scene,context):\n", " \n", " global PrefixData \n", " PrefixData = scene.prefix_data \n", " print (\"Add Prefix to data = \" + str(PrefixData)) \n", " \n", "#Add Prefix to Materials callback\n", "def Prefix_materials_Callback (scene,context):\n", " \n", " global PrefixMat \n", " PrefixMat = scene.prefix_material \n", " print (\"Add Prefix to materials = \" + str(PrefixMat))\n", " \n", "\n", "\n", "#Is Collision callback\n", "def Collision_Callback (scene,context):\n", " \n", " global Collision \n", " Collision = scene.IsCollision \n", " print (\"is a collisoin object = \" + str(Collision))\n", "\n", "\n", "\n", "#-------------------------------------------\n", "#---------BATCH RENAME PROPERTIES-----------\n", "#-------------------------------------------\n", "\n", "def Rename_Properties(scn):\n", " \n", " #name origins\n", " name_origins = [\n", " ('Custom', 'Custom', 'Custom'),\n", " ('Object', 'Object', 'Object'),\n", " ('Mesh', 'Mesh', 'Mesh'),\n", " ('Material', 'Material', 'Material')\n", " ]\n", " \n", " #naming base\n", " bpy.types.Scene.naming_base = EnumProperty(\n", " items = name_origins, \n", " name = \"Name Used:\",\n", " default = \"Custom\",\n", " update = Obj_Base_Name_Selector_Callback,\n", " description='Base name for rename')\n", " scn['naming_base'] = 0\n", " \n", " #custom name\n", " bpy.types.Scene.rename_custom = StringProperty(\n", " name = \"Custom Name\",\n", " default='New Name',\n", " description='Rename all with this String')\n", " scn['rename_custom'] = \"\"\n", " \n", " #Data to rename\n", " \n", " #objects? \n", " bpy.types.Scene.rename_object = BoolProperty(\n", " name = \"Rename Objects\",\n", " default=True,\n", " update = Rename_Objects_Callback, \n", " description = \"Rename Objects\")\n", " scn['rename_object'] = True\n", " \n", " #objects data? \n", " bpy.types.Scene.rename_data = BoolProperty(\n", " name = \"Rename Data\",\n", " default=True,\n", " update = Rename_Data_Callback, \n", " description = \"Rename Object\\'s Data\")\n", " scn['rename_data'] = True\n", " \n", " #materials? \n", " bpy.types.Scene.rename_material = BoolProperty(\n", " name = \"Rename Materials\",\n", " default=False,\n", " update = Rename_materials_Callback, \n", " description = \"Rename Objects\\' Materials\")\n", " scn['rename_material'] = False\n", " \n", " \n", " #Prefix selector\n", " bpy.types.Scene.rename_use_prefix = BoolProperty(\n", " name = \"Add Prefix\",\n", " default=False,\n", " update = Prefix_selector_Callback, \n", " description = \"Prefix Object,data or material names\")\n", " scn['rename_use_prefix'] = False\n", " \n", " #Custom Prefix\n", " bpy.types.Scene.rename_prefix = StringProperty(\n", " name = \"Prefix\",\n", " default='',\n", " description='Prefix name with this string')\n", " scn['rename_prefix'] = \"\"\n", " \n", " #Prefix on Objects?\n", " bpy.types.Scene.prefix_object = BoolProperty(\n", " name = \"Object\",\n", " default=True,\n", " update = Prefix_objects_Callback, \n", " description = \"Prefix Objects names\")\n", " scn['prefix_object'] = True\n", " \n", " #Prefix on Data?\n", " bpy.types.Scene.prefix_data = BoolProperty(\n", " name = \"Data\",\n", " default=True,\n", " update = Prefix_data_Callback, \n", " description = \"Prefix Data Names\")\n", " scn['prefix_data'] = True\n", " \n", " #Prefix on Materials?\n", " bpy.types.Scene.prefix_material = BoolProperty(\n", " name = \"Material\",\n", " default=False,\n", " update = Prefix_materials_Callback, \n", " description = \"Prefix Material Names\")\n", " scn['prefix_material'] = False\n", " \n", " #Is collision?\n", " bpy.types.Scene.IsCollision = BoolProperty(\n", " name = \"Is Collision\",\n", " default=False,\n", " update = Collision_Callback, \n", " description = \"If Checked, add the collision prefix\")\n", " scn['IsCollision'] = False\n", " \n", "\n", " \n", "\n", "\n", "\n", "\n", "#-------------------------------------------\n", "#---------BATCH RENAME FUNCTIONS------------\n", "#-------------------------------------------\n", "\n", "#Get custom rename\n", "def Get_custom_rename(label, key, scn): \n", " global CustomRename \n", " CustomRename = scn[\"rename_custom\"]\n", " \n", "#Get custom Prefix\n", "def Get_custom_prefix(label, key, scn): \n", " global Prefix \n", " Prefix = scn[\"rename_prefix\"]\n", " \n", "\n", "#Get firts Material\n", "def get_first_material_name(ob): \n", " for m_slot in ob.material_slots:\n", " if m_slot.material:\n", " material_name = m_slot.material.name\n", " return material_name\n", " \n", "\n", "\n", " \n", "#main Function for rename \n", "def Rename_detablocks(self, context):\n", " \n", " obs = bpy.context.selected_objects\n", " sufix = 0\n", " global PrefixSelector\n", " global Prefix\n", " \n", " scn = context.scene\n", " \n", " Get_custom_prefix(\"String: \", 'rename_prefix', scn)\n", " \n", " \n", " \n", " for ob in obs:\n", " \n", " #Get Base Names \n", " if ObjBaseNameSelector == 'Object':\n", " name = ob.name\n", " \n", " if ObjBaseNameSelector == 'Mesh':\n", " if ob.data:\n", " name = ob.data.name \n", " else:\n", " name = ob.name\n", " \n", " if ObjBaseNameSelector == 'Material': \n", " material_name = get_first_material_name(ob) \n", " if not material_name:\n", " name = ob.name\n", " else:\n", " name = material_name\n", " \n", " if ObjBaseNameSelector == 'Custom': \n", " name = CustomRename\n", " \n", " if Collision == True:\n", " Prefix = \"UCX\"\n", " PrefixSelector = True\n", " \n", " if PrefixSelector == True:\n", " print (Prefix) \n", " \n", " \n", " #Rename objects Names\n", " if RenameObjects == True:\n", " if (PrefixSelector == True\n", " and PrefixObj == True): \n", " ob.name = Prefix +\"_\"+ name\n", " print (ob.name) \n", " else:\n", " ob.name = name\n", " if (PrefixSelector == True\n", " and PrefixObj == True\n", " and Collision == True):\n", " ob.name = Prefix +\"_\"+ name +\"_\"+ str(sufix)\n", " \n", " #else:\n", " #ob.name = name\n", " \n", " \n", " \n", " #Rename objects data Names \n", " if RenameData == True:\n", " if (ob.data\n", " and ob.data.users == 1): \n", " if (PrefixSelector == True\n", " and PrefixData == True):\n", " ob.data.name = Prefix +\"_\"+ name\n", " else:\n", " ob.data.name = name\n", " \n", " if (PrefixSelector == True\n", " and PrefixData == True\n", " and Collision == True):\n", " ob.data.name = Prefix +\"_\"+ name +\"_\"+ str(sufix)\n", " #else:\n", " #ob.data.name = name \n", " \n", " \n", " #Rename material Names \n", " if RenameMaterials == True:\n", " if ob.material_slots:\n", " for m_slot in ob.material_slots:\n", " if m_slot.material:\n", " if m_slot.material.users == 1:\n", " if (PrefixSelector == True\n", " and PrefixMat == True):\n", " m_slot.material.name = Prefix +\"_\"+ name\n", " #else:\n", " #m_slot.material.name = name\n", " sufix = sufix + 1\n", " \n", " \n", "#-------------------------------------------\n", "#------------FBX EXPORT CALLBACKS-----------\n", "#-------------------------------------------\n", "\n", "def FBX_Show_Axis_Callback (scene,context):\n", " \n", " global FBX_ShowAxis\n", " FBX_ShowAxis = scene.FBX_Show_Axis\n", " print (\"Base name = \" + str(FBX_ShowAxis))\n", " \n", "\n", "def FBX_Axis_Forward_Callback (scene,context):\n", " \n", " global FBX_AxisForward\n", " FBX_AxisForward = scene.FBX_Axis_Forward\n", " print(FBX_AxisForward)\n", " \n", "def FBX_Axis_Up_Callback (scene,context):\n", " \n", " global FBX_AxisUp\n", " FBX_AxisUp = scene.FBX_Axis_Up\n", " print(FBX_AxisUp)\n", "\n", "def FBX_Smoothing_Selector_Callback (scene,context):\n", " \n", " global FBXSmoothingType\n", " FBXSmoothingType = scene.FBX_Smoothing\n", " print (str(FBXSmoothingType))\n", " \n", "def FBX_Base_Name_Selector_Callback (scene,context):\n", " \n", " global FBXBaseNameSelector\n", " FBXBaseNameSelector = scene.FBX_base_name \n", " print (\"Base name = \" + str(FBXBaseNameSelector)) \n", " \n", "def FBX_Relative_Assets_Folder_Callback (scene,context): \n", " \n", " global FBXRelativeFolderSelector\n", " FBXRelativeFolderSelector = scene.FBX_Relative_Assets_Folder \n", " print (\"Base name = \" + str(FBXRelativeFolderSelector))\n", " \n", " \n", "def FBX_Export_Collision_Callback (scene,context): \n", " \n", " global FBX_ExportCollision\n", " FBX_ExportCollision = scene.FBX_Export_Collision_Obj\n", " print (\"Base name = \" + str(FBX_ExportCollision))\n", " \n", " \n", "def FBX_TangentSpace_Callback (scene,context): \n", " \n", " global FBX_Tangent\n", " FBX_Tangent = scene.FBX_TangentSpace\n", " print (\"Base name = \" + str(FBX_Tangent))\n", " \n", " \n", "def FBX_BakeAnim_Callback (scene,context): \n", " \n", " global FBX_Bake_Anim\n", " FBX_Bake_Anim = scene.FBX_BakeAnim\n", " print (\"Base name = \" + str(FBX_Bake_Anim))\n", " \n", "def FBX_NLA_Callback (scene,context): \n", " \n", " global FBX_NLA\n", " FBX_NLA = scene.FBX_Use_NLA\n", " print (\"Base name = \" + str(FBX_NLA))\n", "\n", "def FBX_All_Actions_Callback (scene,context): \n", " \n", " global FBX_AllActions\n", " FBX_AllActions = scene.FBX_All_Actions\n", " print (\"Base name = \" + str(FBX_AllActions))\n", " \n", "def FBX_Anim_Steep_Callback (scene,context): \n", " \n", " global FBX_AnimStep\n", " FBX_AnimStep = scene.FBX_Anim_Step\n", " print (\"Base name = \" + str(FBX_AnimStep))\n", " \n", "def FBX_Anim_Simplify_Callback (scene,context): \n", " \n", " global FBX_AnimSimplify\n", " FBX_AnimSimplify = scene.FBX_Anim_Simplify\n", " print (\"Base name = \" + str(FBX_AnimSimplify))\n", " \n", "def FBX_Use_Anim_Callback (scene,context): \n", " \n", " global FBX_UseAnim\n", " FBX_UseAnim = scene.FBX_Use_Anim\n", " print (\"Base name = \" + str(FBX_UseAnim))\n", " \n", "def FBX_All_Actions_61_Callback (scene,context): \n", " \n", " global FBX_AllActions61\n", " FBX_AllActions61 = scene.FBX_All_Actions_61\n", " print (\"Base name = \" + str(FBX_AllActions61))\n", " \n", "def FBX_Pivot_To_Center_Callback (scene,context): \n", " \n", " global FBX_PivotToCenter\n", " FBX_PivotToCenter = scene.FBX_Pivot_to_Center\n", " print (\"Base name = \" + str(FBX_PivotToCenter)) \n", " \n", "\n", " \n", "\n", "#-------------------------------------------\n", "#-----------FBX EXPORT PROPERTIES-----------\n", "#-------------------------------------------\n", "def FBX_Export_Properties(scn):\n", " \n", " #Use Smoothing faces?\n", " \n", " #Smoothing items\n", " FBX_smooth_Type = [\n", " ('OFF', 'OFF', 'OFF'),\n", " ('FACE', 'FACE', 'FACE'),\n", " ('EDGE', 'EDGE' ,'EDGE')\n", " ]\n", " \n", " #Smoothing\n", " bpy.types.Scene.FBX_Smoothing = EnumProperty(\n", " items = FBX_smooth_Type, \n", " name = \"FBX Smooth type used:\",\n", " default = 'OFF',\n", " update = FBX_Smoothing_Selector_Callback,\n", " description='Smoothing type for the objects')\n", " scn['FBX_Smoothing'] = 0 \n", "\n", " #Pivot To center\n", " bpy.types.Scene.FBX_Pivot_to_Center = BoolProperty(\n", " name = \"Pivot To Center\",\n", " default=False,\n", " update = FBX_Pivot_To_Center_Callback, \n", " description = \"Allow to Export objects with the correct pivot point without have the object at the center of the scene\")\n", " scn['FBX_Pivot_to_Center'] = False \n", " \n", " \n", " #name origins\n", " FBX_name_origins = [\n", " ('Object', 'Object', 'Object'),\n", " ('Custom', 'Custom', 'Custom')\n", " ]\n", " \n", " #naming base\n", " bpy.types.Scene.FBX_base_name = EnumProperty(\n", " items = FBX_name_origins, \n", " name = \"FBX Export Name Used:\",\n", " default = \"Object\",\n", " update = FBX_Base_Name_Selector_Callback,\n", " description='Base name for Export as FBX')\n", " scn['FBX_base_name'] = 0\n", " \n", " #Show Axis?\n", " bpy.types.Scene.FBX_Show_Axis = BoolProperty(\n", " name = \"Show Axis\",\n", " default=False,\n", " update = FBX_Show_Axis_Callback, \n", " description = \"Check for show Axis Orientation\")\n", " scn['FBX_Show_Axis'] = False\n", " \n", " \n", " \n", " #Axis_Forward inputs\n", " FBX_Axis_ForwardList = [\n", " ('X', \"X Forward\", \"\"),\n", " ('Y', \"Y Forward\", \"\"),\n", " ('Z', \"Z Forward\", \"\"),\n", " ('-X', \"-X Forward\", \"\"),\n", " ('-Y', \"-Y Forward\", \"\"),\n", " ('-Z', \"-Z Forward\", \"\")\n", " ]\n", " \n", " #Axis Fordware\n", " bpy.types.Scene.FBX_Axis_Forward = EnumProperty(\n", " items = FBX_Axis_ForwardList, \n", " name = \"Forward\",\n", " default = '-Z',\n", " update = FBX_Axis_Forward_Callback,\n", " description='Set the Forward Axis')\n", " scn['FBX_Axis_Forward'] = 5\n", " \n", " #Axis_Up inputs\n", " FBX_Axis_UpList = [\n", " ('X', \"X Up\", \"\"),\n", " ('Y', \"Y Up\", \"\"),\n", " ('Z', \"Z Up\", \"\"),\n", " ('-X', \"-X Up\", \"\"),\n", " ('-Y', \"-Y Up\", \"\"),\n", " ('-Z', \"-Z Up\", \"\")\n", " ]\n", " \n", " #Axis Up\n", " bpy.types.Scene.FBX_Axis_Up = EnumProperty(\n", " items = FBX_Axis_UpList, \n", " name = \"Up\",\n", " default = 'Y',\n", " update = FBX_Axis_Up_Callback,\n", " description='Set the Up Axis')\n", " scn['FBX_Axis_Up'] = 1\n", " \n", " \n", " \n", " \n", " \n", " #custom name\n", " bpy.types.Scene.FBX_Export_Custom_Name = StringProperty(\n", " name = \"FBX Custom Name\",\n", " default='', #New Name\n", " description='Export Objects with a custom name')\n", " scn['FBX_Export_Custom_Name'] = \"\"\n", " \n", " #Export To relative path: UE ASsets\n", " bpy.types.Scene.FBX_Relative_Assets_Folder = BoolProperty(\n", " name = \"UE Assets Folder\",\n", " default=True,\n", " update = FBX_Relative_Assets_Folder_Callback, \n", " description = \"Export into relative folder called: UE Assets\")\n", " scn['FBX_Relative_Assets_Folder'] = True \n", " \n", " \n", " #custom Path\n", " bpy.types.Scene.FBX_Custom_Export_Path = StringProperty(\n", " name = \"FBX Custom Folder\",\n", " default='', #Custom Export Folder\n", " description='Export Objects To a custom Path',\n", " subtype = 'DIR_PATH')\n", " scn['FBX_Custom_Export_Path'] = \"\"\n", " \n", "\n", "\n", " \n", " #Export Collision Objects too\n", " bpy.types.Scene.FBX_Export_Collision_Obj = BoolProperty(\n", " name = \"Export Collision Objects\",\n", " default=False,\n", " update = FBX_Export_Collision_Callback, \n", " description = \"Export Collision Objects along selected objects\")\n", " scn['FBX_Export_Collision_Obj'] = False \n", " \n", "\n", " \n", " #Use Tangent Space ?\n", " bpy.types.Scene.FBX_TangentSpace = BoolProperty(\n", " name = \"Export Collision Objects\",\n", " default=True,\n", " update = FBX_TangentSpace_Callback, \n", " description = \"Add binormal and tangent vectors, together with normal they form the tangent space (will only work correctly with tris/quads only meshes!\")\n", " scn['FBX_TangentSpace'] = True\n", " \n", " #Use Bake anim ?\n", " bpy.types.Scene.FBX_BakeAnim = BoolProperty(\n", " name = \"Use Bake\",\n", " default=False,\n", " update = FBX_BakeAnim_Callback, \n", " description = \"Export baked keyframe animation\")\n", " scn['FBX_BakeAnim'] = False\n", " \n", " #Use NLA ?\n", " bpy.types.Scene.FBX_Use_NLA = BoolProperty(\n", " name = \"Use NLA\",\n", " default=False,\n", " update = FBX_NLA_Callback, \n", " description = \"Export each non-muted NLA strip as a separated FBX’s AnimStack, if any, instead of global scene animation\")\n", " scn['FBX_Use_NLA'] = False\n", " \n", " #Use All Actions ?\n", " bpy.types.Scene.FBX_All_Actions = BoolProperty(\n", " name = \"Use All actions\",\n", " default=True,\n", " update = FBX_All_Actions_Callback, \n", " description = \"Export each action as a separated FBX’s AnimStack, instead of global scene animation\")\n", " scn['FBX_All_Actions'] = True\n", " \n", " \n", " #Sampling Rate (anim_step)\n", " bpy.types.Scene.FBX_Anim_Step = FloatProperty(\n", " name = \"Sampling Rate\",\n", " default=1,\n", " min=0.01,\n", " max=100,\n", " update= FBX_Anim_Steep_Callback)\n", " scn['FBX_Anim_Step'] = 1 \n", " \n", " #Anim Simplify \n", " bpy.types.Scene.FBX_Anim_Simplify = FloatProperty(\n", " name = \"Simplify\",\n", " default=1,\n", " min=0,\n", " max=10,\n", " update= FBX_Anim_Simplify_Callback)\n", " scn['FBX_Anim_Simplify'] = 1\n", " \n", "\n", " \n", " \n", " \n", " \n", "\n", "#-------------------------------------------\n", "#-----------FBX EXPORT FUNCTIONS------------\n", "#------------------------------------------- \n", "\n", "# Grab values From Custom Path\n", "def Get_Custom_Path(label, key, scn): \n", " global FBX_ExportCustomPath \n", " FBX_ExportCustomPath = scn[\"FBX_Custom_Export_Path\"] \n", " \n", "def Get_Custom_ExportName(label, key, scn): \n", " global FBX_CustomExportName \n", " FBX_CustomExportName = scn[\"FBX_Export_Custom_Name\"]\n", " \n", "\n", "def FBX_SelectCollsionObjects (self,context):\n", " \n", " name = bpy.context.object.name\n", " \n", " obj = bpy.data.objects[name]\n", " activeLayer = bpy.context.scene.active_layer\n", " \n", " # Make visile all layers\n", " scn = bpy.context.scene \n", " for n in range(0,20):\n", " scn.layers[n] = True\n", " \n", " \n", " if FBX_PivotToCenter == False:\n", " obs = bpy.context.selected_objects\n", " for ob in obs:\n", " name = ob.name\n", " bpy.ops.object.select_pattern(extend=True, pattern=\"UCX_\"+name+\"_\"+\"*\", case_sensitive=True)\n", " \n", " if FBX_PivotToCenter == True:\n", " #Extend the selection with All Collisio Objects \n", " bpy.ops.object.select_pattern(extend=True, pattern=\"UCX_\"+name+\"_\"+\"*\", case_sensitive=True) \n", " \n", " \n", " \n", "def FBX_Make_Only_selectedObjLayer_visible (self,context):\n", " \n", " # Make visile only the active object layer\n", " name = bpy.context.object.name \n", " obj = bpy.data.objects[name]\n", " activeLayer = bpy.context.scene.active_layer \n", " \n", " objectLayer = [i for i in range(len(obj.layers)) if obj.layers[i] == True]\n", " for i in range(len(bpy.context.scene.layers)):\n", " if i not in objectLayer: \n", " bpy.context.scene.layers[i] = False\n", " else:\n", " bpy.context.scene.layers[i] = True \n", " if activeLayer not in objectLayer:\n", " bpy.context.scene.layers[activeLayer] = False\n", " \n", " \n", "\n", "\n", "def FBX_Export(self,context): \n", " \n", " global FBX_ExportCustomPath\n", " global FBX_name_multi\n", " \n", " scn = context.scene \n", " \n", " Get_Custom_Path(\"String: \", 'FBX_Custom_Export_Path', scn)\n", " Get_Custom_ExportName(\"String: \", 'FBX_Custom_Export_Path', scn) \n", " \n", " #Get Name \n", " \n", " if FBX_PivotToCenter == True:\n", " \n", " FBX_name = FBX_name_multi \n", " \n", " if FBX_PivotToCenter == False:\n", " \n", " if FBXBaseNameSelector == \"Object\":\n", " FBX_name = bpy.context.object.name\n", " #print(FBX_name)\n", " if FBXBaseNameSelector == \"Custom\":\n", " FBX_name = FBX_CustomExportName\n", " #print(FBX_name) \n", " \n", " #Paths\n", " #FBX_ExportRelativePath = bpy.path.relpath(\"//UE Assets\")\n", " FBX_ExportRelativePath = bpy.path.abspath(\"//UE4 Assets\") \n", " FBX_ExportCustom = bpy.path.abspath(FBX_ExportCustomPath) \n", " \n", " if FBXRelativeFolderSelector == True:\n", " Folder = FBX_ExportRelativePath\n", " if not exists(FBX_ExportRelativePath):\n", " mkdir(FBX_ExportRelativePath)\n", " if FBXRelativeFolderSelector == False:\n", " Folder = FBX_ExportCustom\n", " \n", " #Profiles:\n", " \n", " \n", " if FBX_PivotToCenter == False and FBX_ExportCollision == True:\n", " FBX_SelectCollsionObjects (self,context) \n", " else:\n", " print(\"no collision exported\") \n", " \n", " #Export FBX\n", " bpy.ops.export_scene.fbx(check_existing=True,\n", " filepath= Folder + '/'+ FBX_name +'.fbx',\n", " filter_glob=\"*.fbx\",\n", " version='BIN7400',\n", " use_selection=True, \n", " apply_unit_scale=True,\n", " axis_forward=FBX_AxisForward,\n", " axis_up=FBX_AxisUp,\n", " bake_space_transform=True,\n", " object_types= {'MESH'},\n", " use_mesh_modifiers=True,\n", " mesh_smooth_type=FBXSmoothingType, \n", " use_mesh_edges=False, \n", " use_tspace=True,\n", " use_custom_props=True, \n", " path_mode='AUTO',\n", " embed_textures=False, \n", " batch_mode='OFF', \n", " use_batch_own_dir=False,\n", " use_metadata=True) \n", " \n", " if FBX_PivotToCenter == False and FBX_ExportCollision == True:\n", " bpy.ops.object.select_all(action='DESELECT')\n", " FBX_Make_Only_selectedObjLayer_visible (self,context) \n", " else:\n", " print(\"no collision exported\") \n", " \n", " FBX_ExportCustomPath = \"\"\n", " \n", " \n", " \n", " print (\"Export OK\") \n", "\n", "\n", "\n", "#-------------------------------------------\n", "#-----------------RIG FUNCTIONS-----------------\n", "#-------------------------------------------\n", "\n", "#Rig Properties\n", "\n", "\n", "def Animation_UI_Properties(scn): \n", " \n", " #Show Rig Options\n", " bpy.types.Scene.UE_Show_Rig_Props= BoolProperty(\n", " name = \"Show Rig Options\",\n", " default=True,\n", " update = UE_Show_Rig_Props_Callback, \n", " description = \"Show The options for the RIG\")\n", " scn['UE_Show_Rig_Props'] = True \n", " \n", " #Show Rig Export Options\n", " bpy.types.Scene.UE_Show_Export_options= BoolProperty(\n", " name = \"Show Export Options\",\n", " default=False,\n", " update = UE_Show_Export_option_Callback, \n", " description = \"Show Export Options for customize the fbx name,folder and scale\")\n", " scn['UE_Show_Export_options'] = False\n", "\n", " \n", " \n", "#Rig Callbacks UE_ShowAdvanced_Rig_Prop_Callback \n", "\n", "def RIG_Name_Callback (scene,context): \n", " \n", " global UE_Custom_RIG_Name\n", " UE_Custom_RIG_Name= scene.Custom_RIG_name\n", " \n", "def UE_Show_Rig_Props_Callback (scene,context): \n", " \n", " global UE_ShowRigProps\n", " UE_ShowRigProps= scene.UE_Show_Rig_Props\n", " \n", "def UE_Show_Export_option_Callback (scene,context): \n", " \n", " global UE_ShowRigExport\n", " UE_ShowRigExport= scene.UE_Show_Export_options \n", " \n", " \n", "def ACT_Name_Callback (scene,context): \n", " \n", " global UE_Custom_ACT_Name\n", " UE_Custom_ACT_Name= scene.Custom_ACT_name\n", " \n", "\n", "#-------------------------------------------\n", "#-----------------RIG EXPORT-------------------\n", "#-------------------------------------------\n", "\n", "\n", "\n", "def FBX_Export_Character(self,context): \n", " \n", " scn = context.scene \n", " \n", " Get_Custom_Path(\"String: \", 'FBX_Custom_Export_Path', scn)\n", " Get_Custom_ExportName(\"String: \", 'FBX_Custom_Export_Path', scn) \n", " \n", " #Get Name \n", " \n", " if FBXBaseNameSelector == \"Object\":\n", " FBX_name = bpy.context.object.name\n", " \n", " if FBXBaseNameSelector == \"Custom\":\n", " FBX_name = FBX_CustomExportName\n", " \n", " objName=bpy.context.scene.objects.active.name\n", " \n", " \n", " #Paths\n", " #FBX_ExportRelativePath = bpy.path.relpath(\"//UE Assets\")\n", " FBX_ExportRelativePath = bpy.path.abspath(\"//UE4 Assets/\")\n", " FBX_Character_Path = FBX_ExportRelativePath + objName+\"_Character\" \n", " FBX_ExportCustom = bpy.path.abspath(FBX_ExportCustomPath)\n", " \n", " if FBXRelativeFolderSelector == True:\n", " Folder = FBX_Character_Path\n", " if not exists(FBX_ExportRelativePath):\n", " mkdir(FBX_ExportRelativePath)\n", " if not exists(FBX_Character_Path):\n", " mkdir(FBX_Character_Path)\n", " if FBXRelativeFolderSelector == False:\n", " Folder = FBX_ExportCustom\n", " \n", " #Export FBX\n", " bpy.ops.export_scene.fbx(check_existing=True,\n", " filepath= Folder + '/'+ FBX_name +'.fbx',\n", " filter_glob=\"*.fbx\",\n", " version='BIN7400',\n", " use_selection=True, \n", " axis_forward=FBX_AxisForward,\n", " axis_up=FBX_AxisUp,\n", " bake_space_transform=False,\n", " apply_unit_scale=True,\n", " object_types={'ARMATURE', 'MESH'},\n", " use_mesh_modifiers=True,\n", " mesh_smooth_type=FBXSmoothingType,\n", " use_mesh_edges=False, \n", " use_tspace=True,\n", " use_custom_props=False,\n", " add_leaf_bones=False,\n", " primary_bone_axis='Y',\n", " secondary_bone_axis='X',\n", " use_armature_deform_only=True,\n", " path_mode='AUTO',\n", " embed_textures=False, \n", " batch_mode='OFF', \n", " use_batch_own_dir=False,\n", " use_metadata=True) \n", " \n", " \n", " print (\"Export OK\") \n", "\n", "\n", "\n", "def FBX_Export_BakedAnimation(self,context): \n", " \n", " scn = context.scene \n", " \n", " Get_Custom_Path(\"String: \", 'FBX_Custom_Export_Path', scn)\n", " Get_Custom_ExportName(\"String: \", 'FBX_Custom_Export_Path', scn) \n", " \n", " #Get Name\n", " \n", " ActionName=bpy.context.active_object.animation_data.action.name \n", " objName=bpy.context.scene.objects.active.name\n", " \n", " \n", " if FBXBaseNameSelector == \"Object\":\n", " FBX_name = bpy.context.object.name + \"_\" +ActionName\n", " \n", " if FBXBaseNameSelector == \"Custom\":\n", " FBX_name = FBX_CustomExportName+ \"_\" +ActionName\n", " \n", " \n", " #Paths\n", " #FBX_ExportRelativePath = bpy.path.relpath(\"//UE Assets\")\n", " FBX_ExportRelativePath = bpy.path.abspath(\"//UE4 Assets/\")\n", " FBX_Character_Path = FBX_ExportRelativePath + objName+\"_Character\"\n", " FBX_Animation_Path = FBX_Character_Path+\"/Animations\" \n", " FBX_ExportCustom = bpy.path.abspath(FBX_ExportCustomPath) \n", " \n", " if FBXRelativeFolderSelector == True:\n", " Folder = FBX_Animation_Path\n", " if not exists(FBX_ExportRelativePath):\n", " mkdir(FBX_ExportRelativePath)\n", " if not exists(FBX_Character_Path):\n", " mkdir(FBX_Character_Path)\n", " if not exists(FBX_Animation_Path):\n", " mkdir(FBX_Animation_Path)\n", " if FBXRelativeFolderSelector == False:\n", " Folder = FBX_ExportCustom\n", " \n", " \n", " #Export FBX\n", " bpy.ops.export_scene.fbx(check_existing=True,\n", " filepath= Folder + '/'+ FBX_name +'.fbx',\n", " filter_glob=\"*.fbx\",\n", " version='BIN7400',\n", " use_selection=True, \n", " apply_unit_scale=True,\n", " axis_forward=FBX_AxisForward,\n", " axis_up=FBX_AxisUp,\n", " bake_space_transform=False,\n", " object_types={'ARMATURE'},\n", " add_leaf_bones=False,\n", " primary_bone_axis='Y',\n", " secondary_bone_axis='X',\n", " use_armature_deform_only=True,\n", " bake_anim=True,\n", " bake_anim_use_all_bones =True,\n", " bake_anim_use_nla_strips=False,\n", " bake_anim_use_all_actions=False,\n", " bake_anim_step=FBX_AnimStep,\n", " bake_anim_simplify_factor=FBX_AnimSimplify,\n", " use_anim=True,\n", " use_anim_action_all=False,\n", " use_default_take=False,\n", " use_anim_optimize=False,\n", " anim_optimize_precision=6.0,\n", " path_mode='AUTO',\n", " embed_textures=False, \n", " batch_mode='OFF', \n", " use_batch_own_dir=False,\n", " use_metadata=True) \n", " \n", " print (\"Export OK\") \n", "\n", "\n", "\n", "def FBX_Export_CameraAnimation(self,context): \n", " \n", " scn = context.scene \n", " \n", " Get_Custom_Path(\"String: \", 'FBX_Custom_Export_Path', scn)\n", " Get_Custom_ExportName(\"String: \", 'FBX_Custom_Export_Path', scn) \n", " \n", " #Get Name\n", " \n", " #ActionName=bpy.context.active_object.animation_data.action.name \n", " objName=bpy.context.scene.objects.active.name\n", " \n", " \n", " if FBXBaseNameSelector == \"Object\":\n", " FBX_name = bpy.context.object.name #+ \"_\" +ActionName\n", " \n", " if FBXBaseNameSelector == \"Custom\":\n", " FBX_name = FBX_CustomExportName #+ \"_\" +ActionName\n", " \n", " \n", " #Paths\n", " #FBX_ExportRelativePath = bpy.path.relpath(\"//UE Assets\")\n", " FBX_ExportRelativePath = bpy.path.abspath(\"//UE4 Assets/\")\n", " FBX_Animation_Path = FBX_ExportRelativePath+\"/Camera_Animations\" \n", " FBX_ExportCustom = bpy.path.abspath(FBX_ExportCustomPath) \n", " \n", " if FBXRelativeFolderSelector == True:\n", " Folder = FBX_Animation_Path\n", " if not exists(FBX_ExportRelativePath):\n", " mkdir(FBX_ExportRelativePath)\n", " if not exists(FBX_Animation_Path):\n", " mkdir(FBX_Animation_Path)\n", " if FBXRelativeFolderSelector == False:\n", " Folder = FBX_ExportCustom\n", " \n", " \n", " #Export FBX \n", " \n", " bpy.ops.export_scene.fbx(check_existing=True,\n", " filepath= Folder + '/'+ FBX_name +'.fbx',\n", " filter_glob=\"*.fbx\",\n", " version='BIN7400',\n", " use_selection=True, \n", " apply_unit_scale=True,\n", " axis_forward=FBX_AxisForward,\n", " axis_up=FBX_AxisUp,\n", " bake_space_transform=False,\n", " object_types = {'CAMERA'},\n", " add_leaf_bones=False,\n", " bake_anim=True,\n", " bake_anim_use_all_bones =False,\n", " bake_anim_use_nla_strips=False,\n", " bake_anim_use_all_actions=False,\n", " bake_anim_step=FBX_AnimStep,\n", " bake_anim_simplify_factor=FBX_AnimSimplify,\n", " use_anim=True,\n", " path_mode='AUTO',\n", " embed_textures=False, \n", " batch_mode='OFF', \n", " use_batch_own_dir=False,\n", " use_metadata=True) \n", " \n", " \n", " print (\"Export OK\") \n", " \n", "\n", "\n", "def UE_Export_Animation(self,context):\n", " \n", " #Get A list of objects parented to the selected armature\n", " ArmChildrenList = bpy.context.object.children\n", " \n", " \n", " BonesList = bpy.context.object.pose.bones\n", " BonesListEdit = bpy.context.object.data.edit_bones\n", " ob = bpy.context.object \n", " armature = ob.data\n", " objProps = bpy.context.object\n", " \n", " FakeAction=bpy.context.object.animation_data.action\n", " ArmatureGroups = bpy.context.active_object.pose.bone_groups \n", " \n", " Armature_Rotated=False\n", " \n", " \n", " animationFrames= bpy.context.object.animation_data.action.frame_range[1] \n", " bpy.context.scene.frame_end = animationFrames\n", " \n", " #Store bones with groups for export\n", " if bpy.context.active_object.type == 'ARMATURE': \n", " bpy.ops.object.mode_set( mode='POSE' )\n", " bpy.ops.pose.select_all(action='DESELECT')\n", "\n", " \n", " DeformBonesList=[]\n", " EpicExtraBonesList=[]\n", " \n", " if \"DeformBones\" in ArmatureGroups:\n", " pb_group = ob.pose.bone_groups['DeformBones'] # the pose bone group we wanna select\n", " for bone in BonesList:\n", " if bone.bone_group == pb_group:\n", " DeformBonesList.append(bone.name)\n", " \n", " if \"EpicExtra\" in ArmatureGroups:\n", " pbe_group = ob.pose.bone_groups['EpicExtra'] # the pose bone group we wanna select\n", " for bone in BonesList:\n", " if bone.bone_group == pbe_group:\n", " EpicExtraBonesList.append(bone.name)\n", " \n", " #Separate Bones \n", " bpy.ops.object.mode_set( mode='EDIT' )\n", " bpy.ops.armature.select_all(action='DESELECT') \n", " \n", " for bone in BonesListEdit: \n", " if bone.name in DeformBonesList:\n", " bone.use_deform = True\n", " elif bone.name in EpicExtraBonesList:\n", " bone.use_deform = True\n", " else:\n", " bone.use_deform = False\n", " \n", " bpy.ops.object.mode_set( mode='OBJECT' )\n", " \n", " #Export Armature Animation \n", " FBX_Export_BakedAnimation(self,context)\n", " \n", " #for bone in BonesListEdit:\n", " #bone.use_deform = True\n", "\n", " \n", " \n", " del DeformBonesList[:]\n", " del EpicExtraBonesList[:]\n", " \n", " \n", "def hideIKArmsOFF(): \n", " \n", " BonesList = bpy.context.object.pose.bones\n", " ob = bpy.context.object\n", " \n", " \n", " for bone in BonesList: \n", " if bone.bone_group_index == 3:\n", " bonename = ob.data.bones[bone.name]\n", " ob.data.bones[bone.name].hide = False\n", " if bone.bone_group_index == 5:\n", " bonename = ob.data.bones[bone.name]\n", " ob.data.bones[bone.name].hide = False\n", " \n", "def hideIKArmsON():\n", " \n", " BonesList = bpy.context.object.pose.bones\n", " ob = bpy.context.object\n", " \n", " \n", " for bone in BonesList: \n", " if bone.bone_group_index == 3:\n", " bonename = ob.data.bones[bone.name]\n", " ob.data.bones[bone.name].hide = True\n", " if bone.bone_group_index == 5:\n", " bonename = ob.data.bones[bone.name]\n", " ob.data.bones[bone.name].hide = True\n", " \n", "def hideIKlegOFF():\n", " \n", " BonesList = bpy.context.object.pose.bones\n", " ob = bpy.context.object\n", " \n", " \n", " for bone in BonesList: \n", " if bone.bone_group_index == 2:\n", " bonename = ob.data.bones[bone.name]\n", " ob.data.bones[bone.name].hide = False\n", " if bone.bone_group_index == 4:\n", " bonename = ob.data.bones[bone.name]\n", " ob.data.bones[bone.name].hide = False\n", " \n", " \n", "def hideIKlegON():\n", " \n", " BonesList = bpy.context.object.pose.bones\n", " ob = bpy.context.object\n", " \n", " \n", " for bone in BonesList: \n", " if bone.bone_group_index == 2:\n", " bonename = ob.data.bones[bone.name]\n", " ob.data.bones[bone.name].hide = True\n", " if bone.bone_group_index == 4:\n", " bonename = ob.data.bones[bone.name]\n", " ob.data.bones[bone.name].hide = True\n", " \n", " \n", "#-------------------------------------------\n", "#-----------------BUTTONS-------------------\n", "#-------------------------------------------\n", "\n", "#Export Camera Animation\n", "\n", "class UEExportCamera(bpy.types.Operator):\n", " \"\"\"UE Export Camera Button\"\"\"\n", " bl_idname = \"ue.export_camera\"\n", " bl_label = \"Export Camera Animation\"\n", " \n", " def execute (self, context):\n", " \n", " bpy.ops.transform.rotate(value=1.5708,\n", " axis=(-0.143126, -0.0365628, 0.989029),\n", " constraint_axis=(False, True, False), \n", " constraint_orientation='LOCAL', \n", " mirror=False, proportional='DISABLED', \n", " proportional_edit_falloff='SMOOTH', \n", " proportional_size=1)\n", " \n", " \n", " FBX_Export_CameraAnimation(self,context)\n", " \n", " bpy.ops.transform.rotate(value=-1.5708,\n", " axis=(-0.143126, -0.0365625, 0.989029),\n", " constraint_axis=(False, True, False), \n", " constraint_orientation='LOCAL', \n", " mirror=False, proportional='DISABLED', \n", " proportional_edit_falloff='SMOOTH', \n", " proportional_size=1)\n", " \n", " return {'FINISHED'}\n", " \n", " \n", "#Set UE Scale button\n", "\n", "class UEScaleOperator(bpy.types.Operator):\n", " \"\"\"UE Scale Operator Button\"\"\"\n", " bl_idname = \"ue.scale_operator\"\n", " bl_label = \"Set UE Scale\"\n", " \n", " def execute (self, context):\n", " \n", " scn = context.scene \n", " \n", " \n", " unit = context.scene.unit_settings \n", " \n", " \n", " \n", " #Set unit and scale lenght \n", " unit.system = 'METRIC'\n", " unit.scale_length = 0.01\n", " \n", " context.space_data.clip_start = 0.1\n", " context.space_data.clip_end = 1000000.0\n", " \n", " print (unit.system)\n", " print (unit.scale_length)\n", " \n", " #Scale objects if selected \n", " if ObjScale == True:\n", " \n", " bpy.ops.view3d.snap_cursor_to_center()\n", " bpy.context.space_data.pivot_point = 'CURSOR'\n", " bpy.ops.transform.resize(value=(100, 100, 100))\n", " bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)\n", " bpy.context.space_data.pivot_point = 'MEDIAN_POINT' \n", " \n", " else:\n", " print (\"scale objects is not selected,only will be set the scene scale\") \n", " \n", " return {'FINISHED'}\n", "\n", "#Set Collision Pivots button \n", "class SetCollisionPivots_Button(bpy.types.Operator):\n", " \"\"\"Button Set the pivot poit on collision objects\"\"\"\n", " bl_idname = \"ue.setcollpivots_button\"\n", " bl_label = \"Set Collision Pivots\" \n", " \n", " def execute (self, context):\n", " \n", " #Create group\n", " \n", " \n", " \n", " group = \"CollisionPivotgroup\"\n", " \n", " if group in bpy.data.groups:\n", " print (\"Group already created,will be removed and created again\")\n", " bpy.data.groups[\"CollisionPivotgroup\"].user_clear() \n", " bpy.data.groups.remove(bpy.data.groups[\"CollisionPivotgroup\"])\n", " bpy.ops.group.create(name=\"CollisionPivotgroup\") \n", " else:\n", " bpy.ops.group.create(name=\"CollisionPivotgroup\") \n", " \n", " ActionGroup = bpy.data.groups[\"CollisionPivotgroup\"]\n", " \n", " bpy.ops.object.select_all(action='DESELECT')\n", " \n", " #Group Operation\n", " \n", " for ob in ActionGroup.objects:\n", " \n", " print (ob.name) \n", " ob.select = True\n", " bpy.context.scene.objects.active = ob\n", " bpy.ops.view3d.snap_cursor_to_selected()\n", " FBX_SelectCollsionObjects (self,context)\n", " bpy.ops.object.origin_set(type='ORIGIN_CURSOR')\n", " ob.select = False\n", " bpy.ops.object.select_all(action='DESELECT')\n", " FBX_Make_Only_selectedObjLayer_visible (self,context)\n", " \n", " bpy.data.groups[\"CollisionPivotgroup\"].user_clear() \n", " bpy.data.groups.remove(bpy.data.groups[\"CollisionPivotgroup\"]) \n", " \n", " return {'FINISHED'} \n", " \n", " \n", "#Rename Button\n", "\n", "class Rename_Button(bpy.types.Operator):\n", " \"\"\"Button for Rename\"\"\"\n", " bl_idname = \"rename.button\"\n", " bl_label = \"RenameButton\" \n", " \n", " \n", " @classmethod\n", " def poll(cls, context):\n", " return context.selected_objects != None\n", " \n", " \n", " def execute (self, context):\n", " \n", " scn = context.scene\n", " \n", " Get_custom_rename(\"String: \", 'rename_custom', scn)\n", " #Get_custom_prefix(\"String: \", 'rename_prefix', scn)\n", " \n", " Rename_detablocks(self, context) \n", " \n", " \n", " return {'FINISHED'} \n", " \n", "\n", "#Init button \n", "class InitUEToolsButton(bpy.types.Operator):\n", " \"\"\"Init Main Properties\"\"\"\n", " bl_idname = \"ue.init_button\"\n", " bl_label = \"InitButton\" \n", " \n", " def execute (self, context): \n", " \n", " Main_UI_Properties(bpy.context.scene)\n", " SetObjScale(bpy.context.scene)\n", " Rename_Properties(bpy.context.scene)\n", " FBX_Export_Properties(bpy.context.scene)\n", " Animation_UI_Properties(bpy.context.scene)\n", " \n", " \n", " global Init\n", " Init = True \n", " \n", " return {'FINISHED'} \n", "\n", "#FBX Export Actions\n", "\n", "def FBX_Export_actions(self,context):\n", " \n", " if FBX_PivotToCenter == True: \n", " \n", " global FBX_name_multi\n", " \n", " \n", " scn = context.scene \n", " \n", " sufix = 0\n", " \n", " #Create group\n", " \n", " group = \"exportgroup\"\n", " \n", " if group in bpy.data.groups:\n", " print (\"Group already created\")\n", " else:\n", " bpy.ops.group.create(name=\"exportgroup\") \n", " \n", " ActionGroup = bpy.data.groups[\"exportgroup\"]\n", " \n", " bpy.ops.object.select_all(action='DESELECT') \n", " \n", " Get_Custom_ExportName(\"String: \", 'FBX_Custom_Export_Path', scn)\n", " \n", " #Group Operation\n", " \n", " for ob in ActionGroup.objects:\n", " \n", " print(ob.name) \n", " \n", " ob.select = True\n", " bpy.context.scene.objects.active = ob\n", " \n", " if FBXBaseNameSelector == \"Object\":\n", " FBX_name_multi = ob.name\n", " if FBXBaseNameSelector == \"Custom\":\n", " FBX_name_multi = FBX_CustomExportName + \"_\" + str(sufix)\n", " \n", " #Store initial position\n", " obStartPosX = ob.location[0]\n", " obStartPosY = ob.location[1]\n", " obStartPosZ = ob.location[2]\n", " \n", " if FBX_ExportCollision == False:\n", " print(\"Collision Not Exported\")\n", " if FBX_ExportCollision == True:\n", " FBX_SelectCollsionObjects (self,context) \n", " \n", " \n", " #move object to center\n", " bpy.ops.view3d.snap_cursor_to_center()\n", " bpy.ops.view3d.snap_selected_to_cursor(use_offset=False)\n", " #ob.location = (0,0,0) \n", " \n", " #Export \n", " FBX_Export(self,context)\n", " \n", " #Move to initial position\n", " ob.location = (obStartPosX,obStartPosY,obStartPosZ)\n", " bpy.ops.view3d.snap_cursor_to_active()\n", " bpy.ops.view3d.snap_selected_to_cursor(use_offset=False) \n", " \n", " ob.select = False\n", " \n", " if FBX_ExportCollision == False:\n", " print(\"Collision Not Exported\")\n", " if FBX_ExportCollision == True:\n", " #FBX_SelectCollsionObjects (self,context)\n", " bpy.ops.object.select_all(action='DESELECT')\n", " FBX_Make_Only_selectedObjLayer_visible (self,context) \n", " \n", " sufix = sufix +1 \n", " \n", " bpy.data.groups[\"exportgroup\"].user_clear() \n", " bpy.data.groups.remove(bpy.data.groups[\"exportgroup\"]) \n", " \n", " \n", " #print(\"pivotOK\")\n", " \n", " if FBX_PivotToCenter == False:\n", " FBX_Export(self,context)\n", " print(\"Export normally\") \n", "\n", "\n", "\n", "#FBX Export button \n", "class FBX_ExportButton(bpy.types.Operator):\n", " \"\"\"Button for Fbx Export\"\"\"\n", " bl_idname = \"ue.export_fbx_button\"\n", " bl_label = \"ExportFbxButton\" \n", " \n", " \n", " \n", " def execute (self, context): \n", " \n", " ActualPath = dirname(bpy.data.filepath) \n", " \n", " if FBXRelativeFolderSelector == True: \n", " if ActualPath == \"\":\n", " self.report({'ERROR'}, \"You need Save the file for use save automatically into a relative folder\")\n", " else:\n", " FBX_Export_actions(self,context)\n", " else:\n", " FBX_Export_actions(self,context) \n", " \n", " \n", " #print(\"test OK\")\n", " return {'FINISHED'}\n", "\n", "\n", "\n", "\n", "#Choose Action Buttons \n", "class Action_buttons(bpy.types.Operator):\n", " \"\"\"Select Action For bake the Animations\"\"\"\n", " bl_idname = \"ue.action_change\"\n", " bl_label = \"Actions\" \n", " \n", " act = bpy.props.StringProperty() \n", " \n", " def execute(self, context): \n", " \n", " print(self.act) \n", " \n", " bpy.context.active_object.animation_data.action = bpy.data.actions[self.act]\n", " \n", " return {'FINISHED'} \n", "\n", "#Delete Action Buttons \n", "class Delete_Action_buttons(bpy.types.Operator):\n", " \"\"\"Delete actoin From List\"\"\"\n", " bl_idname = \"ue.action_delete\"\n", " bl_label = \"Actions Delete\" \n", " \n", " actdel = bpy.props.StringProperty() \n", " \n", " def execute(self, context): \n", " \n", " #Remove the new created animation action from the RIG armature\n", " bpy.context.object.animation_data.action = None\n", " \n", " actions = bpy.data.actions\n", " \n", " for action in actions:\n", " if action.name == self.actdel:\n", " bpy.data.actions[self.actdel].user_clear()\n", " \n", " for action in actions: \n", " if action.users == 0 :\n", " bpy.data.actions.remove(action)\n", " \n", " \n", " \n", " return {'FINISHED'}\n", " \n", "def ExportIKAnimation_proces(self,context):\n", " FakeAction=bpy.context.object.animation_data.action\n", " \n", " \n", " if bpy.context.object.animation_data.action != None:\n", " \n", " \n", " UE_Export_Animation(self,context) \n", " \n", " bpy.context.object.animation_data.action = FakeAction\n", " \n", " for action in bpy.data.actions:\n", " if action.users == 0 :\n", " action.user_clear()\n", " bpy.data.actions.remove(action)\n", " else:\n", " self.report({'ERROR'}, \"The armature must have an action asigned\") \n", " \n", " \n", "\n", "#Export IK Animation \n", "class Export_IK_animation(bpy.types.Operator):\n", " \"\"\"Bake the animation from the helper bones to the deform bones and export animation\"\"\"\n", " bl_idname = \"ue_export_anim.button\"\n", " bl_label = \"Export Animation\" \n", " \n", " \n", " \n", " def execute (self, context):\n", " \n", " #global Rotate_Armature\n", " \n", " if FBXRelativeFolderSelector == True: \n", " ActualPath = dirname(bpy.data.filepath)\n", " \n", " if ActualPath == \"\":\n", " self.report({'ERROR'}, \"You need Save the file for use save automatically into a relative folder\")\n", " else:\n", " #Rotate_Armature = self.Rotate_Armature_180\n", " ExportIKAnimation_proces(self,context)\n", " \n", " else:\n", " #Rotate_Armature = self.Rotate_Armature_180\n", " ExportIKAnimation_proces(self,context) \n", " \n", " \n", " return {'FINISHED'}\n", " \n", " \n", "#Bake And Export All Animations\n", "\n", "def ExportAllAnims_proces(self,context):\n", " \n", " ActionList=bpy.data.actions\n", " FakeAction=bpy.context.object.animation_data.action\n", " BonesList = bpy.context.object.pose.bones \n", " \n", "\n", " \n", " #global Rotate_Armature\n", " \n", " for action in ActionList:\n", " if action.use_fake_user == True:\n", " bpy.context.object.animation_data.action = action\n", " UE_Export_Animation(self,context)\n", " \n", " bpy.context.object.animation_data.action = None\n", " bpy.ops.object.mode_set( mode='POSE' )\n", " for bone in BonesList:\n", " bpy.ops.pose.loc_clear()\n", " bpy.ops.pose.rot_clear()\n", " bpy.ops.object.mode_set( mode='OBJECT' )\n", "\n", " \n", " bpy.context.object.animation_data.action = FakeAction\n", " \n", " for action in ActionList: \n", " if action.users == 0 :\n", " action.user_clear()\n", " bpy.data.actions.remove(action) \n", "\n", "\n", "class ExportAllAnims(bpy.types.Operator):\n", " \"\"\"bake and export all animations with Fake User\"\"\"\n", " bl_idname = \"ue_export_all.button\"\n", " bl_label = \"Export All Animations\" \n", " \n", " \n", " \n", " def execute (self, context):\n", " \n", " \n", " \n", " if FBXRelativeFolderSelector == True:\n", " \n", " ActualPath = dirname(bpy.data.filepath)\n", " \n", " if ActualPath == \"\":\n", " self.report({'ERROR'}, \"You need Save the file for use save automatically into a relative folder\")\n", " else:\n", " \n", " ExportAllAnims_proces(self,context) \n", " else:\n", " \n", " ExportAllAnims_proces(self,context) \n", " \n", " return {'FINISHED'}\n", " \n", "\n", "\n", "#Append Hero button\n", "\n", "class AppendHeroTPP(bpy.types.Operator):\n", " \"\"\"Append The Hero Character and the Rig\"\"\"\n", " bl_idname = \"ue.append_hero\"\n", " bl_label = \"Append Hero\"\n", " \n", " Custom_RIG_name = StringProperty(name=\"Custom Name\",update = RIG_Name_Callback)\n", " Include_Hero_value = BoolProperty(name=\"Include Hero Mesh?\")\n", " Include_LowRes = BoolProperty(name=\"Movile version?\")\n", " \n", " def execute (self, context):\n", " \n", " Include_hero = self.Include_Hero_value\n", " RIG_name= self.Custom_RIG_name\n", " HeroLow = self.Include_LowRes\n", " \n", " #Grab the ctive layer before the operation \n", " ActiveLayer = bpy.context.scene.layers.data.active_layer \n", " \n", " #ScriptName = bpy.data.texts['ue_tools_v1-2.py'].name\n", " #ScriptPath = bpy.data.texts['ue_tools_v1-2.py'].filepath\n", " ScriptDirectory = os.path.dirname(os.path.realpath(__file__)) #bpy.data.texts['ue_tools_v1-2.py'].filepath.strip(ScriptName)\n", " BlendFileName = \"UE4_Mannequinn_Template.blend\"\n", "\n", " TemplatePath = os.path.join(ScriptDirectory, BlendFileName, \"Object\", \"SK_MannequinMesh\")\n", " TemplatePathLow = os.path.join(ScriptDirectory, BlendFileName, \"Object\", \"SK_Mannequin_Mobile\")\n", " TemplateDirectory = os.path.join(ScriptDirectory, BlendFileName, \"Object\", \"\")\n", "\n", " RIG_Armature_name = RIG_name\n", " RIG_Mesh_name = RIG_name + \"_MESH\"\n", " \n", " if bpy.data.objects.get(RIG_Armature_name) is not None:\n", " self.report({'ERROR'}, \"Please Give an unique name to the New RIG you already have one \"+RIG_name+\" on the scene\") \n", " \n", " else:\n", " if Include_hero == True: \n", " if HeroLow == False: \n", " bpy.ops.wm.link(filepath= TemplatePath,\n", " directory= TemplateDirectory,\n", " filename=\"SK_MannequinMesh\",\n", " link=True,\n", " relative_path=True,\n", " autoselect=True,\n", " active_layer=True)\n", " \n", " bpy.ops.object.make_local(type='ALL')\n", " \n", " bpy.context.scene.objects.active = bpy.data.objects[\"SK_MannequinMesh\"]\n", " bpy.data.objects['SK_MannequinMesh'].select = True\n", " bpy.data.objects['SK_MannequinMesh'].name = RIG_Mesh_name\n", " bpy.ops.object.select_all(action='DESELECT')\n", " \n", " bpy.context.scene.objects.active = bpy.data.objects[\"HeroTPP_Character\"]\n", " bpy.data.objects['HeroTPP_Character'].select = True\n", " bpy.data.objects['HeroTPP_Character'].name = RIG_Armature_name \n", " bpy.ops.object.mode_set( mode='POSE' )\n", " \n", " if HeroLow == True: \n", " bpy.ops.wm.link(filepath= TemplatePathLow,\n", " directory= TemplateDirectory,\n", " filename=\"SK_Mannequin_Mobile\",\n", " link=True,\n", " relative_path=True,\n", " autoselect=True,\n", " active_layer=True)\n", " \n", " bpy.ops.object.make_local(type='ALL')\n", " \n", " bpy.context.scene.objects.active = bpy.data.objects[\"SK_Mannequin_Mobile\"]\n", " bpy.data.objects['SK_Mannequin_Mobile'].select = True\n", " bpy.data.objects['SK_Mannequin_Mobile'].name = RIG_Mesh_name\n", " bpy.ops.object.select_all(action='DESELECT')\n", " \n", " bpy.context.scene.objects.active = bpy.data.objects[\"HeroTPP_Character\"]\n", " bpy.data.objects['HeroTPP_Character'].select = True\n", " bpy.ops.object.delete()\n", " \n", " bpy.context.scene.objects.active = bpy.data.objects[\"HeroTPP_Character_Mobile\"]\n", " bpy.data.objects['HeroTPP_Character_Mobile'].select = True\n", " bpy.data.objects['HeroTPP_Character_Mobile'].name = RIG_Armature_name \n", " bpy.ops.object.mode_set( mode='POSE' )\n", " \n", " else:\n", " bpy.ops.wm.link(filepath= TemplatePath,\n", " directory= TemplateDirectory,\n", " filename=\"SK_MannequinMesh\",\n", " link=True,\n", " relative_path=True,\n", " autoselect=True,\n", " active_layer=True)\n", " \n", " bpy.ops.object.make_local(type='ALL')\n", " \n", " bpy.context.scene.objects.active = bpy.data.objects[\"SK_MannequinMesh\"]\n", " bpy.data.objects['SK_MannequinMesh'].select = True\n", " bpy.ops.object.delete()\n", " \n", " bpy.context.scene.objects.active = bpy.data.objects[\"HeroTPP_Character\"]\n", " bpy.data.objects['HeroTPP_Character'].select = True\n", " bpy.data.objects['HeroTPP_Character'].name = RIG_Armature_name \n", " bpy.ops.object.mode_set( mode='POSE' ) \n", " \n", " \n", " \n", " return {'FINISHED'}\n", " \n", " \n", " \n", " def invoke(self, context, event): \n", " \n", " global RIG_name, Include_hero , HeroLow \n", " self.Custom_RIG_name = RIG_name\n", " self.Include_Hero_value = Include_hero\n", " self.Include_LowRes = HeroLow \n", " \n", " return context.window_manager.invoke_props_dialog(self)\n", " \n", " \n", " \n", " return {'FINISHED'}\n", "\n", "def UE_ExportCharacter(self,context):\n", " \n", " #Get A list of objects parented to the selected armature\n", " ArmChildrenList = bpy.context.object.children\n", " \n", " \n", " BonesList = bpy.context.object.pose.bones\n", " BonesListEdit = bpy.context.object.data.edit_bones\n", " ob = bpy.context.object\n", " armature = ob.data\n", " objProps = bpy.context.object\n", " ArmatureGroups = bpy.context.active_object.pose.bone_groups\n", " \n", " \n", " \n", " #Store bones with groups for export\n", " if bpy.context.active_object.type == 'ARMATURE': \n", " bpy.ops.object.mode_set( mode='POSE' )\n", " bpy.ops.pose.select_all(action='DESELECT')\n", "\n", " \n", " DeformBonesList=[]\n", " EpicExtraBonesList=[]\n", " \n", " if \"DeformBones\" in ArmatureGroups:\n", " pb_group = ob.pose.bone_groups['DeformBones'] # the pose bone group we wanna select\n", " for bone in BonesList:\n", " if bone.bone_group == pb_group:\n", " DeformBonesList.append(bone.name)\n", " \n", " if \"EpicExtra\" in ArmatureGroups:\n", " pbe_group = ob.pose.bone_groups['EpicExtra'] # the pose bone group we wanna select\n", " for bone in BonesList:\n", " if bone.bone_group == pbe_group:\n", " EpicExtraBonesList.append(bone.name)\n", " \n", " #Separate Bones \n", " bpy.ops.object.mode_set( mode='EDIT' )\n", " bpy.ops.armature.select_all(action='DESELECT') \n", " \n", " for bone in BonesListEdit: \n", " if bone.name in DeformBonesList:\n", " bone.use_deform = True\n", " elif bone.name in EpicExtraBonesList:\n", " bone.use_deform = True\n", " else:\n", " bone.use_deform = False\n", " \n", " bpy.ops.object.mode_set( mode='OBJECT' )\n", " \n", " \n", " #Export armature and child objects (No animation) \n", " FBX_Export_Character(self,context)\n", " \n", " #for bone in BonesListEdit:\n", " #bone.use_deform = True\n", " \n", " del DeformBonesList[:]\n", " del EpicExtraBonesList[:]\n", " \n", "\n", "\n", "#Export Character\n", "class UE_Export_Character(bpy.types.Operator):\n", " \"\"\"Export Character\"\"\"\n", " bl_idname = \"ue_export_character.button\"\n", " bl_label = \"Export Character\" \n", " \n", " \n", " \n", " def execute (self, context):\n", " \n", " \n", " if FBXRelativeFolderSelector == True: \n", " ActualPath = dirname(bpy.data.filepath)\n", " \n", " if ActualPath == \"\":\n", " self.report({'ERROR'}, \"You need Save the file for use save automatically into a relative folder\")\n", " else:\n", " \n", " UE_ExportCharacter(self,context)\n", " else:\n", " \n", " UE_ExportCharacter(self,context)\n", " \n", " return {'FINISHED'}\n", " \n", " \n", " \n", "#Set Deform Bones Group (for no standar skeletons)\n", "\n", "class UE_Set_Deform_Bones(bpy.types.Operator):\n", " \"\"\"Set Deform Bones for no standar skeletons\"\"\"\n", " bl_idname = \"ue_set_deform_bones.button\"\n", " bl_label = \"Set Deform Bones for no Hero RIG skeletons\" \n", " \n", " def execute (self, context):\n", " \n", " BoneList=bpy.context.object.data.bones\n", " BonesSelected = []\n", " \n", " for bone in BoneList:\n", " if bone.select==True:\n", " BonesSelected.append(bone.name)\n", " \n", " if BonesSelected != []: \n", " bpy.ops.pose.group_assign(type=0)\n", " bpy.context.object.pose.bone_groups['Group'].name = \"DeformBones\" \n", " else:\n", " self.report({'ERROR'}, \"You need select some bones\")\n", " print(\"You need select some bone\") \n", " \n", " \n", " return {'FINISHED'}\n", " \n", "#Go to pose mode for set the deform bones (for no standar skeletons)\n", "\n", "class UE_Set_POSE_mode(bpy.types.Operator):\n", " \"\"\"Set pose mode for no standar skeletons\"\"\"\n", " bl_idname = \"ue_set_podemode.button\"\n", " bl_label = \"Set Pose mode for no Hero RIG skeletons\" \n", " \n", " def execute (self, context): \n", " \n", " global Steep1 \n", " \n", " bpy.ops.object.mode_set( mode='POSE' )\n", " Steep1=True\n", " \n", " return {'FINISHED'}\n", " \n", "#Create Automatically DeformBones Gorup\n", "\n", "class UE_AutomaticBoneGroup_button(bpy.types.Operator):\n", " \"\"\"Create the \"DeformBones\" group automatically\"\"\"\n", " bl_idname = \"ue.deformbone_create\"\n", " bl_label = \"Deform Bones\" \n", " \n", " \n", " \n", " def execute(self, context):\n", " \n", " \n", " ArmChildrenList = bpy.context.object.children\n", " BonesListEdit = bpy.context.object.data.edit_bones\n", " BoneList = bpy.context.object.pose.bones\n", " armObject = bpy.context.scene.objects.active\n", " \n", " for child in ArmChildrenList:\n", " bpy.data.objects[child.name].select = True\n", " \n", " nonZero = []\n", " for child in ArmChildrenList:\n", " if child.type == 'MESH': \n", " for vert in child.data.vertices:\n", " # Get a list of the non-zero group weightings for the vertex\n", " for g in vert.groups:\n", " g.weight = round(g.weight, 4) \n", " if g.weight > .0000:\n", " if g.group not in nonZero:\n", " nonZero.append(g.group) \n", " \n", " \n", " \n", " \n", " nonZeroNames = []\n", " BonesSelected = []\n", " \n", " for child in ArmChildrenList:\n", " if child.type == 'MESH':\n", " \n", " vertexGroups = bpy.data.objects[child.name].vertex_groups\n", " for group in vertexGroups: \n", " gName=group.name\n", " gIndex=group.index\n", " if gIndex in nonZero:\n", " if gName not in nonZeroNames:\n", " nonZeroNames.append(gName)\n", " \n", " \n", " bpy.ops.object.select_all(action='DESELECT')\n", " \n", " bpy.data.objects[armObject.name].select = True\n", " bpy.context.scene.objects.active = bpy.data.objects[armObject.name]\n", " \n", " bpy.ops.object.mode_set( mode='POSE' )\n", " \n", " #Store visible bone layers\n", " Bonelayers = bpy.context.object.data.layers\n", " VisibleBoneLayers=[]\n", " for layer in Bonelayers:\n", " if layer == True:\n", " VisibleBoneLayers.append(True)\n", " else:\n", " VisibleBoneLayers.append(False)\n", " \n", " \n", " #Enable All bone layers\n", " for n in range(0,32):\n", " bpy.context.object.data.layers[n] = True\n", "\n", " \n", " #Deselect All bones\n", " bpy.ops.pose.select_all(action='DESELECT')\n", " \n", " \n", " #Reselect the bones \n", " BonesSelected=[]\n", " \n", " for b in BoneList:\n", " if b.name in nonZeroNames:\n", " b.bone.select=True\n", " BonesSelected.append(b.name)\n", " \n", " \n", " #Asign The group\n", " \n", " if BonesSelected != []: \n", " bpy.ops.pose.group_assign()\n", " bpy.context.object.pose.bone_groups['Group'].name = \"DeformBones\" \n", " else:\n", " self.report({'ERROR'}, \"Any bones have vertex associated\")\n", " \n", " #Restore Visible Layers\n", " i=0\n", " for n in range(0,32):\n", " bpy.context.object.data.layers[n] = VisibleBoneLayers[i]\n", " i=i+1\n", " \n", " bpy.ops.object.mode_set( mode='OBJECT' )\n", " \n", " \n", "\n", " return {'FINISHED'} \n", "\n", "\n", "\n", "\n", "\n", " \n", "#New action Button\n", "class UE_New_Action_Button(bpy.types.Operator):\n", " \"\"\"Create a new Action\"\"\"\n", " bl_idname = \"ue.action_new_button\"\n", " bl_label = \"New Action\" \n", " \n", " Custom_ACT_name = StringProperty(name=\"Custom Action\",update = ACT_Name_Callback)\n", " \n", " def execute (self, context): \n", " \n", " ACT_name= self.Custom_ACT_name\n", " \n", " print(ACT_name) \n", " bpy.data.actions.new(ACT_name)\n", " bpy.data.actions[ACT_name].use_fake_user = True \n", " \n", " ob = bpy.context.active_object\n", " \n", " if ob.animation_data == None:\n", " bpy.context.active_object.animation_data_create()\n", " \n", " ob.animation_data.action = bpy.data.actions[ACT_name]\n", " \n", " return {'FINISHED'} \n", " \n", " \n", " def invoke(self, context, event): \n", " \n", " global ACT_name \n", " self.Custom_ACT_name = ACT_name \n", " return context.window_manager.invoke_props_dialog(self)\n", " \n", " return {'FINISHED'} \n", " \n", " \n", " \n", " \n", " \n", "# RIG Props \n", " \n", "class UE_Rig_Props(bpy.types.Operator):\n", " \"\"\"Set the value for the props on the Hero RIG\"\"\"\n", " bl_idname = \"ue_rig_props.button\"\n", " bl_label = \"Set Propeties\" \n", " \n", " \n", " RigProp = bpy.props.StringProperty() \n", " \n", " def execute (self, context):\n", " \n", " \n", " \n", " print(self.RigProp)\n", " \n", " #bpy.context.object[\"Constraints_ON_OFF\"] = 1 \n", " if bpy.context.object[self.RigProp] == 0:\n", " bpy.context.object[self.RigProp] = 1\n", " else:\n", " bpy.context.object[self.RigProp] = 0 \n", " \n", " # \"Buttos CAllbacks\" for IK True \n", " if self.RigProp == \"IKMAIN\":\n", " if bpy.context.object[\"IKMAIN\"]==1:\n", " bpy.context.object[\"IKARMS\"]=1\n", " bpy.context.object[\"IKLEGS\"]=1\n", " bpy.context.object[\"Ik hand R Lock\"]=1\n", " bpy.context.object[\"Ik Hand L Lock\"]=1\n", " bpy.context.object[\"Ik Arm R\"]=1.0\n", " bpy.context.object[\"IK Arm L\"]=1.0\n", " bpy.context.object[\"Foot Lock L\"]=1\n", " bpy.context.object[\"Foot Lock R\"]=1\n", " bpy.context.object[\"Ik Leg L\"]=1.0\n", " bpy.context.object[\"Ik Leg R\"]=1.0\n", " hideIKArmsOFF()\n", " hideIKlegOFF() \n", " \n", " \n", " if bpy.context.object[\"IKMAIN\"]==0:\n", " bpy.context.object[\"IKARMS\"]=0\n", " bpy.context.object[\"IKLEGS\"]=0\n", " bpy.context.object[\"Ik hand R Lock\"]=0\n", " bpy.context.object[\"Ik Hand L Lock\"]=0\n", " bpy.context.object[\"Ik Arm R\"]=0.0\n", " bpy.context.object[\"IK Arm L\"]=0.0\n", " bpy.context.object[\"Foot Lock L\"]=0\n", " bpy.context.object[\"Foot Lock R\"]=0\n", " bpy.context.object[\"Ik Leg L\"]=0.0\n", " bpy.context.object[\"Ik Leg R\"]=0.0\n", " hideIKArmsON()\n", " hideIKlegON() \n", " \n", " # \"Buttos CAllbacks\" for IK ARMS True \n", " if self.RigProp == \"IKARMS\":\n", " if bpy.context.object[\"IKARMS\"]==1: \n", " bpy.context.object[\"Ik hand R Lock\"]=1\n", " bpy.context.object[\"Ik Hand L Lock\"]=1\n", " bpy.context.object[\"Ik Arm R\"]=1.0\n", " bpy.context.object[\"IK Arm L\"]=1.0 \n", " hideIKArmsOFF()\n", " \n", " if bpy.context.object[\"IKARMS\"]==0: \n", " bpy.context.object[\"Ik hand R Lock\"]=0\n", " bpy.context.object[\"Ik Hand L Lock\"]=0 \n", " bpy.context.object[\"Ik Arm R\"]=0.0\n", " bpy.context.object[\"IK Arm L\"]=0.0\n", " hideIKArmsON()\n", " \n", " \n", " # \"Buttos CAllbacks\" for IK LEGS True \n", " if self.RigProp == \"IKLEGS\":\n", " if bpy.context.object[\"IKLEGS\"]==1: \n", " bpy.context.object[\"Foot Lock L\"]=1\n", " bpy.context.object[\"Foot Lock R\"]=1\n", " bpy.context.object[\"Ik Leg L\"]=1.0\n", " bpy.context.object[\"Ik Leg R\"]=1.0\n", " hideIKlegOFF()\n", " \n", " \n", " if bpy.context.object[\"IKLEGS\"]==0: \n", " bpy.context.object[\"Foot Lock L\"]=0\n", " bpy.context.object[\"Foot Lock R\"]=0\n", " bpy.context.object[\"Ik Leg L\"]=0.0\n", " bpy.context.object[\"Ik Leg R\"]=0.0\n", " hideIKlegON()\n", " \n", " \n", " \n", " return {'FINISHED'}\n", " \n", "#-------------------------------------------\n", "#------------------PANEL--------------------\n", "#-------------------------------------------\n", "\n", "class Mainpanel(bpy.types.Panel):\n", " \"\"\"A Custom Panel in the Viewport Toolbar\"\"\"\n", " \n", " bl_label = \"UE4 Tools\"\n", " bl_space_type = 'VIEW_3D'\n", " bl_region_type = 'TOOLS' \n", " #bl_category = 'Tools'\n", " bl_category = 'UE4 Tools'\n", " \n", " @classmethod\n", " def poll (self,context):\n", " return (bpy.context.mode == 'OBJECT' or 'POSE') \n", " \n", " \n", "\n", " def draw(self, context):\n", " \n", " scn = context.scene\n", " rs = bpy.context.scene \n", " \n", " if Init == False:\n", " layout = self.layout \n", " row = layout.row()\n", " col = row.column(align=True)\n", " col.operator ( \"ue.init_button\", text= \"Open UE4 Tools\",icon='VISIBLE_IPO_ON')\n", " \n", " if Init == True:\n", " \n", " #Main Buttons\n", " layout = self.layout \n", " row = layout.row()\n", " col = row.column(align=True)\n", " #col.operator ( \"tests.button\", text= \"Test Button\")\n", " col.separator() \n", " col.prop (scn, 'UESceneTools', text=\"Scene Tools\",icon= 'SCENE_DATA')\n", " col.prop (scn, 'UERenameTools', text=\"Rename Tools\",icon= 'GREASEPENCIL')\n", " col.prop (scn, 'UEAnimationTools', text=\"Animation Tools\",icon= 'POSE_HLT')\n", " col.prop (scn, 'UEExportTools', text=\"Export Tools\",icon= 'EXPORT')\n", " \n", " \n", " \n", " if (UE_SceneTools == True): \n", " #Scene Settings\n", " layout = self.layout \n", " \n", " box = layout.box()\n", " box.label (\"Scene Settings\",icon=\"SCENE_DATA\")\n", " \n", " row = box.row() \n", " col = row.column(align=True) \n", " col.operator(\"ue.scale_operator\", text=\"Set UE Scale\", icon='ZOOM_SELECTED')\n", " col.prop(scn, 'UEObjScale')\n", " col.separator()\n", " col.label (\"Additional Tools\",icon=\"HELP\")\n", " col.operator(\"ue.setcollpivots_button\", text=\"Set Collision Pivots\",icon='INLINK') \n", " \n", " if (UE_RenameTools == True): \n", " #Rename Settings\n", " \n", " box = layout.box()\n", " box.label (\"Batch Rename Options\",icon=\"SORTALPHA\")\n", " \n", " box2 = box.box()\n", " row = box2.row() \n", " row.label(text='Base Name:')\n", " row = box2.row() \n", " row.prop(scn,'naming_base', expand=True)\n", " \n", " col = box2.column()\n", " col.prop(scn,'rename_custom') \n", " \n", " box3 = box.box()\n", " col = box3.column() \n", " col.label('Datablocks to rename:')\n", " \n", " col.prop(scn, 'rename_object')\n", " col.prop(scn, 'rename_data')\n", " col.prop(scn, 'rename_material') \n", " \n", " box4 = box.box()\n", " \n", " \n", " col= box4.column()\n", " col.label (\"Prefix?\")\n", " col.prop (scn, 'IsCollision', text= \"Collider\")\n", " col.prop(scn, 'rename_use_prefix', text=\"Custom Prefix\") \n", " col.prop(scn, 'rename_prefix',text= \"Custom\")\n", " \n", " \n", " box4.label (\"Where to add?\")\n", " row = box4.row()\n", " row.prop(scn, 'prefix_object')\n", " row.prop(scn,'prefix_data')\n", " row.prop(scn, 'prefix_material')\n", " row = box.row(align = True)\n", " row.operator (\"rename.button\", text=\"Rename\", icon='GREASEPENCIL')\n", " row = layout.row()\n", " \n", " if (UE_AnimationTools == True):\n", " \n", " box = layout.box()\n", " box.label (\"Animation Tools\",icon=\"POSE_HLT\")\n", " col = box.column()\n", " #Button For append The character \n", " if context.mode == 'OBJECT': \n", " col.operator ( \"ue.append_hero\", text= \"Append Hero RIG!\",icon='VISIBLE_IPO_ON' ) \n", " \n", " #Check if I have selected object in context(prevent error if change layer)\n", " if bpy.context.selected_objects != []: \n", " # DO all this only if active object is an ARMATURE\n", " if bpy.context.active_object.type == 'CAMERA':\n", " col.operator (\"ue.export_camera\",text =\"Export Camera Animation\",icon='FORWARD')\n", " row=box.row() \n", " row.prop (scn, 'UE_Show_Export_options')\n", " \n", " if UE_ShowRigExport == True: \n", " \n", " box14=box.box()\n", " col=box14.column()\n", " col.prop (scn,'FBX_Show_Axis')\n", " col.prop (scn,'FBX_Anim_Step')\n", " col.prop (scn,'FBX_Anim_Simplify')\n", " \n", " if FBX_ShowAxis == True:\n", " col.prop (scn,'FBX_Axis_Forward')\n", " col.prop (scn,'FBX_Axis_Up') \n", " \n", " #name settings\n", " box6 = box.box()\n", " col= box6.column()\n", " row=box6.row(align=True)\n", " col.label(text='FBX Name:') \n", " row.prop(scn,'FBX_base_name', expand=True) \n", " col.prop(scn,'FBX_Export_Custom_Name',text = \"Custom Name\")\n", " \n", " #Folder settings\n", " box14 = box.box()\n", " col= box14.column()\n", " col.label (\"Export Directory:\")\n", " col.prop(scn,'FBX_Relative_Assets_Folder',text= \"Relative: UE Assets\") \n", " col.prop(scn,\"FBX_Custom_Export_Path\" ,text = \"Custom Path\") \n", " \n", " if bpy.context.active_object.type == 'ARMATURE': \n", " \n", " objProps = bpy.context.object\n", " ArmatureGroups = bpy.context.active_object.pose.bone_groups \n", " \n", " if context.mode == 'OBJECT': \n", " #Tools For bake Animations \n", " box8 = box.box() \n", " col = box8.column() \n", " \n", " col.label (\"Export Anim Tools\") \n", " \n", " if \"DeformBones\" in ArmatureGroups: \n", " col.operator (\"ue_export_character.button\",text = \"Export Character\", icon='FORWARD')\n", " col.operator ( \"ue_export_anim.button\", text= \"Export Animation\", icon='FORWARD')\n", " col.operator ( \"ue_export_all.button\" ,text = \"Export All Animations\", icon='FORWARD')\n", " \n", " row=box.row() \n", " row.prop (scn, 'UE_Show_Export_options')\n", " \n", " if UE_ShowRigExport == True: \n", " \n", " box14=box.box()\n", " col=box14.column()\n", " col.prop (scn, 'FBX_Smoothing')\n", " col.prop (scn,'FBX_Show_Axis')\n", " col.prop (scn,'FBX_Anim_Step')\n", " col.prop (scn,'FBX_Anim_Simplify')\n", " \n", " if FBX_ShowAxis == True:\n", " col.prop (scn,'FBX_Axis_Forward')\n", " col.prop (scn,'FBX_Axis_Up') \n", " \n", " #name settings\n", " box6 = box.box()\n", " col= box6.column()\n", " row=box6.row(align=True)\n", " col.label(text='FBX Name:') \n", " row.prop(scn,'FBX_base_name', expand=True) \n", " col.prop(scn,'FBX_Export_Custom_Name',text = \"Custom Name\")\n", " \n", " #Folder settings\n", " box14 = box.box()\n", " col= box14.column()\n", " col.label (\"Export Directory:\")\n", " col.prop(scn,'FBX_Relative_Assets_Folder',text= \"Relative: UE Assets\") \n", " col.prop(scn,\"FBX_Custom_Export_Path\" ,text = \"Custom Path\")\n", " \n", " else:\n", " col.alignment='CENTER'\n", " col.label(\"Steep 1:\",icon='INFO')\n", " col.label(\"For use this tools this\")\n", " col.label(\"armature must have a bone\")\n", " col.label(\"group called 'DeformBones'.\")\n", " col.operator( \"ue_set_podemode.button\", text= \"Manual Creation\")\n", " col.operator( \"ue.deformbone_create\", text= \"Auto Creation!\")\n", " \n", " \n", " \n", "\n", " if context.mode == 'POSE':\n", " if \"HeroRIG\" not in objProps:\n", " if Steep1 == 1:\n", " if \"DeformBones\" not in ArmatureGroups:\n", " box8 = box.box() \n", " col = box8.column()\n", " col.label(\"Steep 2:\",icon='INFO')\n", " col.label(\"Select The bones you\")\n", " col.label(\"want to Export and \")\n", " col.label(\"press the button below\") \n", " col.operator (\"ue_set_deform_bones.button\", text = \"Set Deform Bones\")\n", " \n", " if \"HeroRIG\" in objProps: \n", " row=box.row()\n", " #row.prop(scn,'UE_Show_Rig_Props',text=\"Show Rig Options\")\n", " if bpy.context.object[\"ShowRiGoptions\"]==0: \n", " row.operator(\"ue_rig_props.button\", text=\"Show RIG Options\",icon='CHECKBOX_DEHLT').RigProp=\"ShowRiGoptions\" \n", " if bpy.context.object[\"ShowRiGoptions\"]==1:\n", " row.operator(\"ue_rig_props.button\", text=\"Show RIG Options\",icon='CHECKBOX_HLT').RigProp=\"ShowRiGoptions\" \n", " \n", " if bpy.context.object[\"ShowRiGoptions\"]==1: \n", " box9 = box.box() \n", " box9.label(\"Rig Options\")\n", " row=box9.row()\n", " \n", " if bpy.context.object[\"ShowAdvancedProps\"]==0: \n", " row.operator(\"ue_rig_props.button\", text=\"Advanced Options\",icon='CHECKBOX_DEHLT').RigProp=\"ShowAdvancedProps\" \n", " if bpy.context.object[\"ShowAdvancedProps\"]==1:\n", " row.operator(\"ue_rig_props.button\", text=\"Advanced Options\",icon='CHECKBOX_HLT').RigProp=\"ShowAdvancedProps\" \n", " \n", " row=box9.row() \n", " if bpy.context.object.show_x_ray == True:\n", " row.prop (context.object, \"show_x_ray\",text=\"X Ray\", expand=True,icon='RESTRICT_VIEW_OFF')\n", " else:\n", " row.prop (context.object, \"show_x_ray\",text=\"X Ray\", expand=True,icon='RESTRICT_VIEW_ON')\n", " \n", " if bpy.context.object.data.show_names == True:\n", " row.prop (context.object.data, \"show_names\",text=\"Names\", expand=True,icon='RESTRICT_VIEW_OFF')\n", " else:\n", " row.prop (context.object.data, \"show_names\",text=\"Names\", expand=True,icon='RESTRICT_VIEW_ON')\n", " \n", " if bpy.context.object.data.show_axes == True:\n", " row.prop (context.object.data, \"show_axes\",text=\"Axes\", expand=True,icon='RESTRICT_VIEW_OFF')\n", " else:\n", " row.prop (context.object.data, \"show_axes\",text=\"Axes\", expand=True,icon='RESTRICT_VIEW_ON')\n", " \n", " row=box9.row()\n", " #row.prop(scn,'UE_ShowAdvanced_Rig_Prop',text= \"Show Advanced Options?\")\n", " row = box9.row(align=True)\n", " row.prop(context.active_object.data, 'layers', index=0, toggle=True, text='Deform Bones',icon='BONE_DATA')\n", " row.prop(context.active_object.data, 'layers', index=2, toggle=True, text='Helper Bones',icon='POSE_DATA')\n", " \n", " #Show Constraints Button\n", " row = box9.row() \n", " if bpy.context.object[\"Constraints_ON_OFF\"]==0:\n", " row.operator(\"ue_rig_props.button\", text=\"Constraints ON\",icon='LINKED').RigProp=\"Constraints_ON_OFF\" \n", " if bpy.context.object[\"Constraints_ON_OFF\"]==1:\n", " row.operator(\"ue_rig_props.button\", text=\"Constraints OFF\",icon='UNLINKED').RigProp=\"Constraints_ON_OFF\" \n", " \n", " #Show IK Swith Button \n", " row = box9.row(align=True) \n", " #row.prop(scn,'UEAnimationIK_ONOFF',icon= 'CONSTRAINT') \n", " if bpy.context.object[\"IKMAIN\"]==0:\n", " row.operator(\"ue_rig_props.button\", text=\"IK OFF\",icon='UNLINKED').RigProp=\"IKMAIN\" \n", " if bpy.context.object[\"IKMAIN\"]==1:\n", " row.operator(\"ue_rig_props.button\", text=\"IK ON\",icon='LINKED').RigProp=\"IKMAIN\" \n", " \n", " #Show IK arms and legs Buttons \n", " if bpy.context.object[\"IKMAIN\"]==1:\n", " \n", " row = box9.row()\n", " #Show Buttons for arms\n", " if bpy.context.object[\"IKARMS\"]==0:\n", " row.operator(\"ue_rig_props.button\", text=\"IK Arms\",icon='RESTRICT_VIEW_ON').RigProp=\"IKARMS\" \n", " \n", " if bpy.context.object[\"IKARMS\"]==1:\n", " row.operator(\"ue_rig_props.button\", text=\"IK Arms\",icon='RESTRICT_VIEW_OFF').RigProp=\"IKARMS\"\n", " #Show buttons for legs \n", " if bpy.context.object[\"IKLEGS\"]==0: \n", " row.operator(\"ue_rig_props.button\", text=\"IK Legs\",icon='RESTRICT_VIEW_ON').RigProp=\"IKLEGS\" \n", " if bpy.context.object[\"IKLEGS\"]==1:\n", " row.operator(\"ue_rig_props.button\", text=\"IK Legs\",icon='RESTRICT_VIEW_OFF').RigProp=\"IKLEGS\" \n", " \n", " \n", " #SHow IK value Bars\n", " if bpy.context.object[\"IKARMS\"]==1: \n", " row = box9.row()\n", " row.prop (context.object, '[\"Ik Arm R\"]', slider = True)\n", " row = box9.row()\n", " row.prop (context.object, '[\"IK Arm L\"]', slider = True)\n", " \n", " if bpy.context.object[\"IKLEGS\"]==1:\n", " row = box9.row()\n", " row.prop (context.object, '[\"Ik Leg R\"]', slider = True)\n", " row = box9.row()\n", " row.prop (context.object, '[\"Ik Leg L\"]', slider = True)\n", " \n", " \n", " #Show Ik Loks\n", " if bpy.context.object[\"IKARMS\"]==1: \n", " #Show button for Lock R hand \n", " row = box9.row() \n", " if bpy.context.object[\"Ik hand R Lock\"]==0:\n", " row.operator(\"ue_rig_props.button\", text=\"Hand R\",icon='UNLOCKED').RigProp=\"Ik hand R Lock\" \n", " if bpy.context.object[\"Ik hand R Lock\"]==1:\n", " row.operator(\"ue_rig_props.button\", text=\"Hand R\",icon='LOCKED').RigProp=\"Ik hand R Lock\" \n", " \n", " #Show button for Lock L hand\n", " if bpy.context.object[\"Ik Hand L Lock\"]==0:\n", " row.operator(\"ue_rig_props.button\", text=\"Hand L\",icon='UNLOCKED').RigProp=\"Ik Hand L Lock\" \n", " if bpy.context.object[\"Ik Hand L Lock\"]==1:\n", " row.operator(\"ue_rig_props.button\", text=\"Hand L\",icon='LOCKED').RigProp=\"Ik Hand L Lock\" \n", " \n", " #Show Slider for animate hand lock\n", " if bpy.context.object[\"ShowAdvancedProps\"]==1:\n", " row = box9.row()\n", " row.prop (context.object, '[\"Ik hand R Lock\"]',text=\"Hand R\", slider = True)\n", " row.prop (context.object, '[\"Ik Hand L Lock\"]',text=\"Hand L\", slider = True)\n", " \n", " if bpy.context.object[\"IKLEGS\"]==1:\n", " #Show button for Lock R Foot\n", " row = box9.row() \n", " if bpy.context.object[\"Foot Lock R\"]==0:\n", " row.operator(\"ue_rig_props.button\", text=\"Foot R\",icon='UNLOCKED').RigProp=\"Foot Lock R\" \n", " if bpy.context.object[\"Foot Lock R\"]==1:\n", " row.operator(\"ue_rig_props.button\", text=\"Foot R\",icon='LOCKED').RigProp=\"Foot Lock R\" \n", " \n", " #Show button for Lock L Foot\n", " if bpy.context.object[\"Foot Lock L\"]==0:\n", " row.operator(\"ue_rig_props.button\", text=\"Foot R\",icon='UNLOCKED').RigProp=\"Foot Lock L\" \n", " if bpy.context.object[\"Foot Lock L\"]==1:\n", " row.operator(\"ue_rig_props.button\", text=\"Foot R\",icon='LOCKED').RigProp=\"Foot Lock L\" \n", " \n", " #Show Slider for animate hand lock\n", " if bpy.context.object[\"ShowAdvancedProps\"]==1:\n", " row = box9.row()\n", " row.prop (context.object, '[\"Foot Lock R\"]',text=\"Hand R\", slider = True)\n", " row.prop (context.object, '[\"Foot Lock L\"]',text=\"Hand L\", slider = True) \n", " \n", " row=box9.row()\n", " row.label(\"Inherit Rotation:\",icon='GROUP_BONE')\n", " \n", " row=box9.row() \n", " if bpy.context.object[\"Head inherit Rotation\"]==0: \n", " row.operator(\"ue_rig_props.button\", text=\"Head\",icon='CHECKBOX_DEHLT',emboss=False).RigProp=\"Head inherit Rotation\" \n", " if bpy.context.object[\"Head inherit Rotation\"]==1:\n", " row.operator(\"ue_rig_props.button\", text=\"Head\",icon='CHECKBOX_HLT',emboss=False).RigProp=\"Head inherit Rotation\"\n", " \n", " \n", " if bpy.context.object[\"Arms inherit Rotation\"]==0: \n", " row.operator(\"ue_rig_props.button\", text=\"Arms\",icon='CHECKBOX_DEHLT',emboss=False).RigProp=\"Arms inherit Rotation\" \n", " if bpy.context.object[\"Arms inherit Rotation\"]==1:\n", " row.operator(\"ue_rig_props.button\", text=\"Arms\",icon='CHECKBOX_HLT',emboss=False).RigProp=\"Arms inherit Rotation\" \n", " \n", " \n", " if bpy.context.object[\"Waist Inherit Rotation\"]==0: \n", " row.operator(\"ue_rig_props.button\", text=\"Waist\",icon='CHECKBOX_DEHLT',emboss=False).RigProp=\"Waist Inherit Rotation\" \n", " if bpy.context.object[\"Waist Inherit Rotation\"]==1:\n", " row.operator(\"ue_rig_props.button\", text=\"Waist\",icon='CHECKBOX_HLT', emboss=False).RigProp=\"Waist Inherit Rotation\" \n", " \n", " row=box9.row() \n", " if bpy.context.object[\"ShowAdvancedProps\"]==1:\n", " row.prop (context.object, '[\"Head inherit Rotation\"]',text=\"Head inherit rotation\", slider = True) \n", " row.prop (context.object, '[\"Arms inherit Rotation\"]',text=\"Arms inherit rotation\", slider = True) \n", " row.prop (context.object, '[\"Waist Inherit Rotation\"]',text=\"Wais inherit rotation\", slider = True)\n", " \n", " \n", " \n", " \n", " #DIsplay The Faked Actions stored on Data\n", " actions = bpy.data.actions\n", " \n", " box = layout.box()\n", " box.label(\"Available Actions\", icon='ACTION')\n", " row=box.row()\n", " row=box.row(align=True)\n", " row.alignment = 'RIGHT'\n", " row.operator (\"ue.action_new_button\",icon='ZOOMIN')\n", " box12=box.box() \n", " row=box12.row()\n", " col = row.column()\n", " \n", " for action in actions: \n", " if action.use_fake_user == True: \n", " col.operator(\"ue.action_change\", text=action.name,).act=action.name \n", " col = row.column()\n", " \n", " for action in actions: \n", " if action.use_fake_user == True: \n", " col.operator(\"ue.action_delete\",icon='X', text=\"\").actdel=action.name \n", " \n", " if (UE_ExportTools == True):\n", " \n", " #FBX Export Settings\n", " layout = self.layout \n", " \n", " box = layout.box()\n", " box.label (\"Export Tools\",icon=\"EXPORT\")\n", " \n", " #General settings\n", " box5 = box.box()\n", " col = box5.column()\n", " row = box5.row()\n", " #row.prop (scn, 'FBX_AssetTypeSelector', expand=True)\n", " \n", " row.label (\"FBX Settings:\")\n", " col = box5.column()\n", " col.prop (scn, 'FBX_Pivot_to_Center')\n", " col.prop (scn,'FBX_Export_Collision_Obj',text= \"Export collision\")\n", " \n", " col.prop (scn,'FBX_Show_Axis')\n", " if FBX_ShowAxis == True:\n", " col.prop (scn,'FBX_Axis_Forward')\n", " col.prop (scn,'FBX_Axis_Up')\n", " col.prop (scn, 'FBX_Smoothing')\n", " \n", " #name settings\n", " box6 = box.box()\n", " row = box6.row()\n", " row.label(text='FBX Name:')\n", " row = box6.row()\n", " row.prop(scn,'FBX_base_name', expand=True)\n", " col = box6.column()\n", " col.prop(scn,'FBX_Export_Custom_Name',text = \"Custom Name\") \n", " \n", " #Folder settings\n", " box7 = box.box()\n", " col = box7.column()\n", " col.label (\"Export Directory:\")\n", " col.prop(scn,'FBX_Relative_Assets_Folder',text= \"Relative: UE4 Assets\") \n", " col.prop(scn,\"FBX_Custom_Export_Path\" ,text = \"Custom Path\") \n", " \n", " col = box.column()\n", " col.operator ( \"ue.export_fbx_button\", text= \"FBX Export\",icon='FORWARD') \n", "\n", "\n", "\n", " \n", " \n", "#-------------------------------------------\n", "#-----------------REGISTER------------------\n", "#-------------------------------------------\n", "\n", "classes = [\n", " SetCollisionPivots_Button,\n", " FBX_ExportButton,\n", " InitUEToolsButton, \n", " Rename_Button,\n", " UE_Export_Character,\n", " UEScaleOperator,\n", " AppendHeroTPP,\n", " Mainpanel,\n", " Export_IK_animation,\n", " ExportAllAnims,\n", " Action_buttons,\n", " UE_Set_Deform_Bones,\n", " UE_Set_POSE_mode,\n", " UE_Rig_Props,\n", " UE_New_Action_Button,\n", " Delete_Action_buttons,\n", " UE_AutomaticBoneGroup_button,\n", " UEExportCamera \n", " ] \n", " \n", " \n", "def register(): \n", " \n", " for c in classes: \n", " bpy.utils.register_class(c) \n", "\n", "def unregister(): \n", " \n", " for c in classes:\n", " bpy.utils.unregister_class(c)\n", "\n", "if __name__ == \"__main__\":\n", " register()\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0.18181818181818182, 0.03571428571428571, 0.041666666666666664, 0.023809523809523808, 0.016129032258064516, 0.1, 0.03333333333333333, 0.023809523809523808, 0.034482758620689655, 0.02857142857142857, 0.041666666666666664, 0.025, 0.04, 0, 0.08333333333333333, 0, 0.02127659574468085, 0.2, 0, 0, 0, 0, 0, 0.2, 0.0392156862745098, 0, 0, 0, 0.044444444444444446, 0.022222222222222223, 0.022222222222222223, 0.0625, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05, 0, 0, 0, 0, 0, 0.05263157894736842, 0, 0, 0, 0, 0.041666666666666664, 0.05555555555555555, 0.05263157894736842, 0.05263157894736842, 0, 0.1, 0.125, 0.058823529411764705, 0, 0, 0.0625, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705, 0, 0, 0, 0, 0.047619047619047616, 0.043478260869565216, 0, 0, 0, 0, 0, 0.043478260869565216, 0, 0, 0.03571428571428571, 0.05555555555555555, 0.07692307692307693, 0, 0, 0.058823529411764705, 0, 0.047619047619047616, 0, 0, 0, 0.041666666666666664, 0, 0, 0.037037037037037035, 0, 0.058823529411764705, 0, 0, 0, 0, 0, 0, 0, 0.047619047619047616, 0, 0, 0, 0, 0.043478260869565216, 0, 0, 0, 0.022222222222222223, 0.022222222222222223, 0.022222222222222223, 0.047619047619047616, 0.061224489795918366, 0.2, 0, 0, 0.2, 0.2, 0.045454545454545456, 0.06, 0.2, 0, 0, 0.2, 0.2, 0.045454545454545456, 0.06, 0.2, 0, 0, 0.2, 0, 0.04, 0.05660377358490566, 0.2, 0, 0, 0, 0.022222222222222223, 0.022222222222222223, 0.022222222222222223, 0.034482758620689655, 0.09523809523809523, 0, 0.06666666666666667, 0, 0.06976744186046512, 0.04, 0, 0.2, 0.09523809523809523, 0, 0.06666666666666667, 0, 0.06818181818181818, 0.0425531914893617, 0, 0, 0.05555555555555555, 0, 0.06666666666666667, 0, 0.06818181818181818, 0.0425531914893617, 0, 0.2, 0.047619047619047616, 0, 0.06666666666666667, 0, 0.06382978723404255, 0.04, 0, 0, 0, 0.022222222222222223, 0.022222222222222223, 0.022222222222222223, 0, 0.043478260869565216, 0.043478260869565216, 0.043478260869565216, 0, 0.06976744186046512, 0.2, 0, 0.029411764705882353, 0.047619047619047616, 0, 0, 0.043478260869565216, 0.043478260869565216, 0.043478260869565216, 0.043478260869565216, 0.16666666666666666, 0.041666666666666664, 0.07352941176470588, 0.0625, 0.029411764705882353, 0.08333333333333333, 0, 0, 0.022222222222222223, 0.022222222222222223, 0.022222222222222223, 0, 0, 0, 0, 0.044444444444444446, 0.022727272727272728, 0.022222222222222223, 0, 0.05, 0.07017543859649122, 0.2, 0, 0.021739130434782608, 0.03278688524590164, 0.1111111111111111, 0, 0.030303030303030304, 0.044444444444444446, 0.125, 0.034482758620689655, 0.023809523809523808, 0.018867924528301886, 0, 0.05714285714285714, 0.07142857142857142, 0.125, 0.038461538461538464, 0.027777777777777776, 0.02127659574468085, 0, 0.045454545454545456, 0.06382978723404255, 0.125, 0.03333333333333333, 0.022222222222222223, 0.017543859649122806, 0, 0.03333333333333333, 0, 0.06521739130434782, 0.125, 0.034482758620689655, 0.021739130434782608, 0.0392156862745098, 0, 0.03125, 0.06666666666666667, 0.125, 0.041666666666666664, 0, 0.03508771929824561, 0, 0.034482758620689655, 0.07142857142857142, 0.125, 0.04, 0.05405405405405406, 0.03636363636363636, 0.2, 0.029411764705882353, 0.06382978723404255, 0.125, 0.041666666666666664, 0.02564102564102564, 0.017241379310344827, 0.2, 0, 0, 0.08695652173913043, 0.075, 0.125, 0.041666666666666664, 0.02857142857142857, 0.017857142857142856, 0, 0, 0, 0.044444444444444446, 0.022222222222222223, 0.022222222222222223, 0, 0.03571428571428571, 0.2, 0.05555555555555555, 0, 0, 0, 0, 0, 0, 0.2, 0.058823529411764705, 0, 0.057692307692307696, 0.06896551724137931, 0.07142857142857142, 0.04, 0, 0, 0.2, 0.058823529411764705, 0, 0.06666666666666667, 0, 0, 0, 0.2, 0.05, 0.2, 0.1111111111111111, 0, 0.06060606060606061, 0, 0.06976744186046512, 0.05, 0, 0.2, 0.08695652173913043, 0, 0.06666666666666667, 0, 0.075, 0.0425531914893617, 0, 0.2, 0.1, 0, 0.05714285714285714, 0, 0.06666666666666667, 0.038461538461538464, 0, 0.2, 0.2, 0.09523809523809523, 0, 0.06896551724137931, 0, 0.06818181818181818, 0.03225806451612903, 0, 0.2, 0.05263157894736842, 0, 0.08, 0, 0, 0, 0.2, 0.041666666666666664, 0, 0.08, 0, 0.06976744186046512, 0.043478260869565216, 0, 0.2, 0.047619047619047616, 0, 0.08695652173913043, 0, 0.075, 0.046511627906976744, 0, 0.2, 0.038461538461538464, 0, 0.07407407407407407, 0, 0.06666666666666667, 0.0425531914893617, 0, 0.2, 0.05263157894736842, 0, 0.06451612903225806, 0, 0.07894736842105263, 0.03225806451612903, 0, 0.2, 0, 0.16666666666666666, 0, 0, 0, 0, 0.044444444444444446, 0.022222222222222223, 0.022222222222222223, 0, 0.05263157894736842, 0.041666666666666664, 0.034482758620689655, 0, 0.2, 0.05263157894736842, 0.041666666666666664, 0.043478260869565216, 0, 0.2, 0, 0.05, 0.02702702702702703, 0, 0, 0, 0, 0.1111111111111111, 0, 0, 0.2, 0.1, 0.02631578947368421, 0.2, 0, 0, 0, 0, 0.2, 0, 0.2, 0, 0.2, 0.2, 0.1111111111111111, 0.05263157894736842, 0.1111111111111111, 0.06060606060606061, 0, 0, 0.1111111111111111, 0, 0, 0.020833333333333332, 0, 0, 0.07692307692307693, 0.018518518518518517, 0.014084507042253521, 0, 0, 0, 0, 0.1111111111111111, 0.019230769230769232, 0, 0.07692307692307693, 0.03333333333333333, 0, 0, 0.07692307692307693, 0.02857142857142857, 0.044444444444444446, 0.125, 0.125, 0.06666666666666667, 0.029411764705882353, 0.02564102564102564, 0.057692307692307696, 0.045454545454545456, 0.03571428571428571, 0, 0, 0.02564102564102564, 0.058823529411764705, 0.05555555555555555, 0.06557377049180328, 0.058823529411764705, 0.05263157894736842, 0.03125, 0.05263157894736842, 0.058823529411764705, 0.058823529411764705, 0.06976744186046512, 0.03225806451612903, 0, 0.03773584905660377, 0.023255813953488372, 0.04878048780487805, 0.03773584905660377, 0, 0, 0.047619047619047616, 0.023255813953488372, 0.05128205128205128, 0.05, 0.05714285714285714, 0.043478260869565216, 0.03333333333333333, 0.1111111111111111, 0.1111111111111111, 0.06818181818181818, 0.027777777777777776, 0, 0, 0, 0, 0.01818181818181818, 0.038461538461538464, 0.0273972602739726, 0.02857142857142857, 0.01639344262295082, 0, 0.058823529411764705, 0.1111111111111111, 0.022222222222222223, 0.022222222222222223, 0.022222222222222223, 0, 0.045454545454545456, 0.1111111111111111, 0, 0, 0.02127659574468085, 0.2, 0, 0.0425531914893617, 0.1111111111111111, 0, 0, 0, 0.14285714285714285, 0.07142857142857142, 0.1111111111111111, 0, 0, 0, 0, 0.05660377358490566, 0.2, 0, 0, 0.029411764705882353, 0.2, 0.05660377358490566, 0.1111111111111111, 0, 0.020833333333333332, 0.03636363636363636, 0.2, 0.06557377049180328, 0.2, 0, 0.015151515151515152, 0.016666666666666666, 0.2, 0.2, 0.05357142857142857, 0.2, 0, 0, 0.018518518518518517, 0.2, 0.2, 0.058823529411764705, 0.2, 0, 0, 0.021739130434782608, 0.2, 0.2, 0.06382978723404255, 0.2, 0, 0, 0.020833333333333332, 0.2, 0.09523809523809523, 0.2, 0, 0, 0.023809523809523808, 0, 0.08, 0.2, 0, 0, 0.02040816326530612, 0.2, 0.08163265306122448, 0.2, 0, 0, 0.02127659574468085, 0.2, 0.07692307692307693, 0.2, 0, 0, 0.0196078431372549, 0.2, 0.0851063829787234, 0.2, 0, 0, 0.021739130434782608, 0.2, 0.07547169811320754, 0.2, 0, 0, 0.0196078431372549, 0.2, 0.07407407407407407, 0.2, 0, 0, 0.037037037037037035, 0.2, 0, 0.2, 0, 0.044444444444444446, 0.022222222222222223, 0.022222222222222223, 0.03125, 0.2, 0.038461538461538464, 0.2, 0.047619047619047616, 0, 0, 0, 0.044444444444444446, 0, 0.2, 0.06666666666666667, 0, 0.05454545454545454, 0.05, 0.08, 0.04, 0, 0.030303030303030304, 0, 0.047619047619047616, 0, 0.058823529411764705, 0, 0.0625, 0.023255813953488372, 0.025, 0.2, 0.2, 0.1111111111111111, 0, 0, 0, 0, 0.2, 0.058823529411764705, 0, 0.05357142857142857, 0.05, 0.07142857142857142, 0.04, 0, 0, 0.2, 0.0625, 0, 0.07142857142857142, 0, 0.07142857142857142, 0.03508771929824561, 0, 0.2, 0.2, 0.2, 0.08, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0.05263157894736842, 0, 0.05, 0.07692307692307693, 0.08333333333333333, 0.045454545454545456, 0, 0, 0.2, 0.05, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0.07692307692307693, 0, 0.05454545454545454, 0.09523809523809523, 0.08695652173913043, 0.05128205128205128, 0, 0, 0.2, 0.2, 0.2, 0.2, 0.2, 0.11764705882352941, 0, 0.058823529411764705, 0.06666666666666667, 0, 0, 0.2, 0.025, 0, 0.05714285714285714, 0, 0.05454545454545454, 0.028169014084507043, 0.02040816326530612, 0.2, 0.2, 0.11764705882352941, 0, 0.05555555555555555, 0.047619047619047616, 0, 0.06666666666666667, 0, 0.2, 0, 0, 0.2, 0.058823529411764705, 0, 0.046511627906976744, 0, 0.061224489795918366, 0.0273972602739726, 0.02127659574468085, 0.2, 0, 0.2, 0.08, 0, 0.046511627906976744, 0, 0.06666666666666667, 0.018404907975460124, 0, 0.2, 0.047619047619047616, 0, 0.07407407407407407, 0, 0.07317073170731707, 0.03508771929824561, 0, 0.2, 0.06666666666666667, 0, 0.07692307692307693, 0, 0.08333333333333333, 0.022900763358778626, 0, 0.2, 0.043478260869565216, 0, 0.058823529411764705, 0, 0.06818181818181818, 0.02727272727272727, 0, 0.2, 0.2, 0.06451612903225806, 0, 0.0625, 0, 0, 0, 0.024390243902439025, 0.03333333333333333, 0.2, 0.1, 0, 0.07407407407407407, 0, 0, 0, 0.022727272727272728, 0, 0.2, 0, 0.2, 0.05555555555555555, 0.3333333333333333, 0.2, 0, 0.044444444444444446, 0.022222222222222223, 0.04081632653061224, 0, 0, 0.043478260869565216, 0.02702702702702703, 0.017241379310344827, 0.2, 0.038461538461538464, 0.027777777777777776, 0, 0.2, 0, 0.043478260869565216, 0.2, 0.02564102564102564, 0.0625, 0, 0, 0.1111111111111111, 0, 0.025, 0.03333333333333333, 0, 0.1111111111111111, 0.1111111111111111, 0.05128205128205128, 0, 0, 0, 0.009174311926605505, 0.058823529411764705, 0.02631578947368421, 0.028985507246376812, 0.01694915254237288, 0.1111111111111111, 0.07692307692307693, 0.16666666666666666, 0.05084745762711865, 0.2, 0, 0.02, 0, 0.018867924528301886, 0.2, 0.012658227848101266, 0, 0.025, 0, 0, 0.015873015873015872, 0, 0, 0.2, 0.2, 0, 0, 0.0967741935483871, 0.2, 0, 0, 0.2, 0.03225806451612903, 0.1111111111111111, 0, 0.013888888888888888, 0.1111111111111111, 0.13333333333333333, 0.2, 0.029411764705882353, 0.1111111111111111, 0.024390243902439025, 0.058823529411764705, 0.02857142857142857, 0.1111111111111111, 0, 0, 0.034482758620689655, 0.022222222222222223, 0, 0.06060606060606061, 0.1111111111111111, 0.09090909090909091, 0.016129032258064516, 0.014285714285714285, 0.014084507042253521, 0.043478260869565216, 0.023809523809523808, 0, 0, 0, 0.023255813953488372, 0, 0.1111111111111111, 0.06666666666666667, 0.2, 0.2, 0.04477611940298507, 0.05660377358490566, 0, 0.018518518518518517, 0.058823529411764705, 0.0625, 0, 0.04225352112676056, 0, 0, 0.02, 0, 0, 0, 0, 0.018867924528301886, 0, 0.015384615384615385, 0.019230769230769232, 0, 0.012345679012345678, 0, 0.019230769230769232, 0.020833333333333332, 0, 0.0196078431372549, 0.125, 0.029850746268656716, 0, 0.045454545454545456, 0, 0.025, 0.1111111111111111, 0, 0.2, 0.2, 0.2, 0.10714285714285714, 0, 0, 0, 0.044444444444444446, 0.02040816326530612, 0.022222222222222223, 0, 0.0625, 0, 0, 0.05555555555555555, 0.14285714285714285, 0.045454545454545456, 0.018867924528301886, 0.05714285714285714, 0, 0.06521739130434782, 0.037037037037037035, 0.02702702702702703, 0.2, 0.034482758620689655, 0.017241379310344827, 0.05263157894736842, 0, 0.06, 0.033707865168539325, 0, 0, 0.16666666666666666, 0.25, 0.058823529411764705, 0, 0.09302325581395349, 0.2, 0, 0.021739130434782608, 0.2, 0.07692307692307693, 0.2, 0, 0.022222222222222223, 0.2, 0.07142857142857142, 0.2, 0, 0.037037037037037035, 0.2, 0.2, 0.06976744186046512, 0.2, 0, 0.021739130434782608, 0.2, 0, 0.022222222222222223, 0.020833333333333332, 0.022222222222222223, 0, 0, 0, 0.06818181818181818, 0.2, 0.03225806451612903, 0.1111111111111111, 0, 0.013888888888888888, 0.1111111111111111, 0.13333333333333333, 0.1, 0, 0, 0.07692307692307693, 0.024390243902439025, 0, 0.2, 0.02, 0.07142857142857142, 0.1111111111111111, 0.18181818181818182, 0.016129032258064516, 0, 0.02531645569620253, 0, 0.043478260869565216, 0.023809523809523808, 0, 0, 0, 0, 0, 0.023255813953488372, 0, 0.06666666666666667, 0.0625, 0, 0.04225352112676056, 0, 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0.019230769230769232, 0, 0, 0, 0, 0, 0, 0, 0.019230769230769232, 0.020833333333333332, 0, 0.0196078431372549, 0.1111111111111111, 0.2, 0.10714285714285714, 0, 0, 0, 0.061224489795918366, 0.2, 0.03225806451612903, 0.1111111111111111, 0, 0.013888888888888888, 0.1111111111111111, 0.07142857142857142, 0.2, 0.028985507246376812, 0.02, 0.2, 0.1, 0.025, 0.01639344262295082, 0.07692307692307693, 0.024390243902439025, 0.03508771929824561, 0.07692307692307693, 0.1111111111111111, 0.18181818181818182, 0.016129032258064516, 0, 0.013888888888888888, 0.03125, 0.014705882352941176, 0.043478260869565216, 0.023809523809523808, 0, 0, 0, 0, 0, 0, 0, 0.023255813953488372, 0, 0.2, 0.058823529411764705, 0.125, 0, 0.04225352112676056, 0, 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.016666666666666666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.019230769230769232, 0.020833333333333332, 0, 0.0196078431372549, 0.2, 0.07142857142857142, 0, 0, 0, 0.06, 0.2, 0.03225806451612903, 0.1111111111111111, 0, 0.013888888888888888, 0.1111111111111111, 0.07142857142857142, 0.2, 0.02857142857142857, 0.02, 0.2, 0.1, 0.025, 0.03225806451612903, 0.07692307692307693, 0.024390243902439025, 0.03389830508474576, 0.07692307692307693, 0.1111111111111111, 0.18181818181818182, 0.016129032258064516, 0, 0.02666666666666667, 0.014705882352941176, 0.043478260869565216, 0.023809523809523808, 0, 0, 0, 0, 0, 0.023255813953488372, 0, 0.2, 0.058823529411764705, 0.15, 0.2, 0, 0.04225352112676056, 0, 0, 0.02, 0, 0, 0, 0, 0.03571428571428571, 0, 0, 0.01639344262295082, 0, 0, 0, 0, 0, 0, 0.019230769230769232, 0.020833333333333332, 0, 0.019230769230769232, 0.1111111111111111, 0.2, 0.12, 0.14285714285714285, 0, 0, 0.05128205128205128, 0.2, 0.01639344262295082, 0, 0.07692307692307693, 0.07692307692307693, 0.021739130434782608, 0, 0.027777777777777776, 0, 0, 0.06666666666666667, 0.017857142857142856, 0.015384615384615385, 0.2, 0.037037037037037035, 0.2, 0.1111111111111111, 0.038461538461538464, 0, 0.07142857142857142, 0.05128205128205128, 0.029850746268656716, 0.0425531914893617, 0, 0, 0.06666666666666667, 0.07407407407407407, 0.03333333333333333, 0.1111111111111111, 0, 0.041237113402061855, 0.027777777777777776, 0.02040816326530612, 0.01818181818181818, 0.045454545454545456, 0, 0.021052631578947368, 0, 0.04, 0.017241379310344827, 0.14285714285714285, 0.07692307692307693, 0.0425531914893617, 0.017543859649122806, 0.1111111111111111, 0.043478260869565216, 0, 0, 0, 0, 0, 0, 0.1111111111111111, 0.04081632653061224, 0.05555555555555555, 0.05128205128205128, 0.020833333333333332, 0.1111111111111111, 0.027777777777777776, 0.05, 0, 0.125, 0.1111111111111111, 0.03225806451612903, 0, 0.2, 0.2, 0.045454545454545456, 0.2, 0, 0, 0.09090909090909091, 0.25, 0.05128205128205128, 0, 0, 0, 0, 0, 0, 0.07692307692307693, 0.05, 0.2, 0, 0, 0.09090909090909091, 0.3333333333333333, 0.05128205128205128, 0, 0, 0, 0, 0, 0, 0.07692307692307693, 0.05, 0.2, 0, 0, 0.2, 0.25, 0.05128205128205128, 0, 0, 0, 0, 0, 0, 0.07692307692307693, 0.07692307692307693, 0, 0.2, 0, 0, 0.2, 0.25, 0.05128205128205128, 0, 0, 0, 0, 0, 0, 0.07692307692307693, 0.07692307692307693, 0.022222222222222223, 0.022222222222222223, 0.022222222222222223, 0, 0.04, 0, 0, 0, 0, 0, 0.2, 0.030303030303030304, 0.1111111111111111, 0, 0, 0.013888888888888888, 0.015151515151515152, 0.0136986301369863, 0.014285714285714285, 0, 0.029411764705882353, 0.1111111111111111, 0.04081632653061224, 0.1111111111111111, 0, 0, 0.013888888888888888, 0.015151515151515152, 0.0136986301369863, 0.014285714285714285, 0, 0.1111111111111111, 0, 0.2, 0.2, 0.047619047619047616, 0, 0, 0, 0, 0, 0.2, 0.030303030303030304, 0.1111111111111111, 0.034482758620689655, 0.1111111111111111, 0.07142857142857142, 0.045454545454545456, 0.1111111111111111, 0.08333333333333333, 0.1111111111111111, 0.07317073170731707, 0, 0, 0.1111111111111111, 0, 0, 0.1111111111111111, 0.03571428571428571, 0.029411764705882353, 0.1111111111111111, 0.04878048780487805, 0.034482758620689655, 0.07692307692307693, 0, 0, 0, 0.011494252873563218, 0.013333333333333334, 0.025, 0, 0.031914893617021274, 0.06666666666666667, 0, 0, 0.06060606060606061, 0.018867924528301886, 0, 0, 0.023809523809523808, 0.07692307692307693, 0.030303030303030304, 0.1111111111111111, 0.045454545454545456, 0.1111111111111111, 0.1111111111111111, 0.07692307692307693, 0.02631578947368421, 0.1111111111111111, 0, 0.01282051282051282, 0.013888888888888888, 0, 0.015625, 0, 0.014492753623188406, 0.1111111111111111, 0, 0.07692307692307693, 0, 0.07692307692307693, 0.04, 0.07692307692307693, 0, 0.016666666666666666, 0.047619047619047616, 0, 0, 0, 0.03773584905660377, 0, 0, 0, 0.030303030303030304, 0.07692307692307693, 0.014705882352941176, 0.012658227848101266, 0.1111111111111111, 0.03225806451612903, 0.5, 0.5, 0.06666666666666667, 0, 0, 0, 0, 0.029411764705882353, 0.16666666666666666, 0.2, 0.058823529411764705, 0, 0.020833333333333332, 0.2, 0.2, 0.06060606060606061, 0.1111111111111111, 0, 0.2, 0, 0.015873015873015872, 0.05263157894736842, 0.021739130434782608, 0.06666666666666667, 0.1111111111111111, 0.058823529411764705, 0.2, 0, 0.11764705882352941, 0, 0, 0, 0.03125, 0.1111111111111111, 0.05, 0.1111111111111111, 0, 0, 0, 0, 0, 0.1111111111111111, 0.1111111111111111, 0.05, 0.03571428571428571, 0.1111111111111111, 0.03225806451612903, 0, 0.05, 0, 0.05263157894736842, 0.2, 0.038461538461538464, 0.07692307692307693, 0, 0.125, 0.1111111111111111, 0.06896551724137931, 0.1111111111111111, 0, 0.1111111111111111, 0.045454545454545456, 0.1111111111111111, 0, 0.2, 0, 0.022727272727272728, 0, 0.01639344262295082, 0.2, 0, 0.1111111111111111, 0.018518518518518517, 0.1111111111111111, 0, 0.1111111111111111, 0.04, 0.1111111111111111, 0, 0.017857142857142856, 0.03571428571428571, 0.041666666666666664, 0, 0, 0.03571428571428571, 0, 0, 0, 0, 0.058823529411764705, 0.027777777777777776, 0, 0, 0, 0.07692307692307693, 0.022222222222222223, 0, 0.022727272727272728, 0.05084745762711865, 0.07692307692307693, 0.07692307692307693, 0.05714285714285714, 0, 0, 0.05555555555555555, 0.03571428571428571, 0.05555555555555555, 0.02702702702702703, 0.07692307692307693, 0.02631578947368421, 0.03125, 0, 0.011904761904761904, 0.030303030303030304, 0, 0.07692307692307693, 0.022222222222222223, 0, 0.022727272727272728, 0.017241379310344827, 0, 0.03529411764705882, 0.07692307692307693, 0.05, 0.045454545454545456, 0.016666666666666666, 0.014285714285714285, 0.1111111111111111, 0.1111111111111111, 0.07692307692307693, 0.1111111111111111, 0.02857142857142857, 0.030303030303030304, 0.02857142857142857, 0, 0, 0, 0.13043478260869565, 0.022727272727272728, 0, 0, 0.029411764705882353, 0.2, 0.14285714285714285, 0.07692307692307693, 0.08333333333333333, 0.1111111111111111, 0.02040816326530612, 0.1111111111111111, 0.034482758620689655, 0, 0.008695652173913044, 0, 0.02040816326530612, 0, 0.038461538461538464, 0.1, 0.07692307692307693, 0.07692307692307693, 0, 0, 0, 0, 0, 0.125, 0.023809523809523808, 0, 0, 0.034482758620689655, 0.1111111111111111, 0.025, 0.2, 0.030303030303030304, 0.041666666666666664, 0.03225806451612903, 0.125, 0.011764705882352941, 0.125, 0.03225806451612903, 0, 0.08333333333333333, 0.02040816326530612, 0, 0, 0.027777777777777776, 0.1111111111111111, 0.023255813953488372, 0.2, 0.025, 0.1111111111111111, 0.014084507042253521, 0, 0.25, 0, 0.1111111111111111, 0, 0, 0, 0.043478260869565216, 0.023255813953488372, 0.02857142857142857, 0, 0.03571428571428571, 0.05, 0.125, 0.03571428571428571, 0.2, 0.045454545454545456, 0.017857142857142856, 0.2, 0.05555555555555555, 0.03508771929824561, 0.1111111111111111, 0.09090909090909091, 0.05454545454545454, 0.16666666666666666, 0, 0.14285714285714285, 0, 0.02857142857142857, 0, 0, 0, 0.012987012987012988, 0.1111111111111111, 0.125, 0, 0.13043478260869565, 0.02127659574468085, 0.010869565217391304, 0, 0.02857142857142857, 0.2, 0.3333333333333333, 0.07692307692307693, 0.06060606060606061, 0.1111111111111111, 0.03125, 0.125, 0.03508771929824561, 0, 0.06666666666666667, 0, 0.008695652173913044, 0, 0.016666666666666666, 0.01818181818181818, 0.058823529411764705, 0, 0.017857142857142856, 0.03636363636363636, 0.1111111111111111, 0.1, 0.03571428571428571, 0.2, 0.3333333333333333, 0.03125, 0, 0.024390243902439025, 0.2, 0.03125, 0.017857142857142856, 0.02127659574468085, 0.2, 0, 0.2, 0.07142857142857142, 0.2, 0.03225806451612903, 0.024390243902439025, 0, 0.021739130434782608, 0.047619047619047616, 0, 0.0392156862745098, 0, 0, 0, 0.03773584905660377, 0, 0.25, 0.03389830508474576, 0.2, 0.05263157894736842, 0.03225806451612903, 0, 0.022222222222222223, 0, 0, 0, 0, 0, 0.025, 0.5, 0.2, 0.2, 0.06060606060606061, 0.1111111111111111, 0.14285714285714285, 0.04, 0.043478260869565216, 0.041666666666666664, 0, 0.06666666666666667, 0, 0.008695652173913044, 0, 0.0625, 0.03333333333333333, 0, 0.08333333333333333, 0.03333333333333333, 0.05, 0, 0.2, 0, 0, 0.1, 0, 0.024390243902439025, 0, 0, 0, 0.2, 0.047619047619047616, 0, 0, 0.14285714285714285, 0.030303030303030304, 0.1111111111111111, 0, 0.02564102564102564, 0, 0.058823529411764705, 0.034482758620689655, 0.0136986301369863, 0.1111111111111111, 0.016129032258064516, 0.015151515151515152, 0.022556390977443608, 0, 0, 0.01020408163265306, 0.009615384615384616, 0.011494252873563218, 0, 0, 0, 0.1111111111111111, 0, 0.015503875968992248, 0.07692307692307693, 0, 0.03278688524590164, 0.022727272727272728, 0.016666666666666666, 0.029411764705882353, 0.014925373134328358, 0.02040816326530612, 0.017241379310344827, 0.01818181818181818, 0.017543859649122806, 0.02127659574468085, 0, 0.047619047619047616, 0.010869565217391304, 0, 0, 0, 0.047619047619047616, 0.010752688172043012, 0, 0.023809523809523808, 0.03389830508474576, 0.05, 0.03773584905660377, 0.015873015873015872, 0.029411764705882353, 0.014285714285714285, 0.02040816326530612, 0.017241379310344827, 0.01818181818181818, 0.017543859649122806, 0.02127659574468085, 0, 0.047619047619047616, 0.010526315789473684, 0, 0.012345679012345678, 0, 0.047619047619047616, 0.010752688172043012, 0, 0, 0.047619047619047616, 0.01, 0, 0.02197802197802198, 0.03389830508474576, 0.03125, 0, 0.017857142857142856, 0.029411764705882353, 0.014925373134328358, 0.02040816326530612, 0.017241379310344827, 0.01818181818181818, 0.017543859649122806, 0.02564102564102564, 0, 0.02564102564102564, 0.011363636363636364, 0, 0, 0.058823529411764705, 0.011235955056179775, 0, 0.0125, 0.05172413793103448, 0.08333333333333333, 0.08333333333333333, 0.125, 0.03571428571428571, 0.14285714285714285, 0.125, 0.1111111111111111, 0.043478260869565216, 0.125, 0.037037037037037035, 0, 0, 0.02564102564102564, 0.038461538461538464, 0, 0.14285714285714285, 0.125, 0.1, 0.03571428571428571, 0, 0.05263157894736842, 0.16666666666666666, 0.04838709677419355, 0, 0.2, 0.2, 0.021739130434782608, 0, 0, 0, 0, 0, 0.2, 0.3333333333333333, 0.2, 0.05, 0.029850746268656716, 0.0425531914893617, 0, 0, 0.06666666666666667, 0.07407407407407407, 0.03333333333333333, 0.1111111111111111, 0, 0.041237113402061855, 0.027777777777777776, 0.02040816326530612, 0.01818181818181818, 0.045454545454545456, 0, 0.021052631578947368, 0, 0.04, 0.017241379310344827, 0.14285714285714285, 0.07692307692307693, 0.0425531914893617, 0.017543859649122806, 0.1111111111111111, 0.043478260869565216, 0, 0, 0, 0, 0, 0, 0.1111111111111111, 0.04081632653061224, 0.1111111111111111, 0.1111111111111111, 0.045454545454545456, 0.023255813953488372, 0.09090909090909091, 0.027777777777777776, 0.05555555555555555, 0.07692307692307693, 0, 0, 0.2, 0, 0, 0.1111111111111111, 0.02127659574468085, 0, 0, 0.02857142857142857, 0.2, 0.3333333333333333, 0.07692307692307693, 0.06060606060606061, 0.1111111111111111, 0.125, 0.05263157894736842, 0.03773584905660377, 0.06666666666666667, 0.029411764705882353, 0.017241379310344827, 0.05263157894736842, 0.0625, 0.04, 0, 0.08333333333333333, 0.022222222222222223, 0.07692307692307693, 0, 0.3333333333333333, 0.16666666666666666, 0.16666666666666666, 0.0392156862745098, 0, 0.02127659574468085, 0, 0, 0.015625, 0.07692307692307693, 0.030303030303030304, 0.1111111111111111, 0.0625, 0.03571428571428571, 0.1, 0.03225806451612903, 0.08571428571428572, 0.02040816326530612, 0.058823529411764705, 0.075, 0.02127659574468085, 0.025, 0.06666666666666667, 0.015151515151515152, 0.03773584905660377, 0.1111111111111111, 0.1111111111111111, 0.06896551724137931, 0.16666666666666666, 0.014492753623188406, 0, 0.022727272727272728, 0, 0, 0.01639344262295082, 0.07692307692307693, 0.058823529411764705, 0.1111111111111111, 0.1, 0.1111111111111111, 0.0625, 0.09523809523809523, 0.1111111111111111, 0.034482758620689655, 0.16666666666666666, 0.025, 0, 0.017857142857142856, 0, 0, 0.029411764705882353, 0.1111111111111111, 0.2, 0.2, 0.03125, 0.1111111111111111, 0.05555555555555555, 0.05454545454545454, 0.016666666666666666, 0.02, 0.018518518518518517, 0.1, 0.02564102564102564, 0.017857142857142856, 0.1, 0.045454545454545456, 0.02564102564102564, 0.04081632653061224, 0.02, 0.012345679012345678, 0, 0.039473684210526314, 0.021739130434782608, 0.03508771929824561, 0.03389830508474576, 0.03225806451612903, 0.016129032258064516, 0.1111111111111111, 0.1111111111111111, 0.07407407407407407, 0.03571428571428571, 0.1, 0.02564102564102564, 0.02631578947368421, 0.058823529411764705, 0.013333333333333334, 0.019417475728155338, 0.05263157894736842, 0.05, 0.023255813953488372, 0.01818181818181818, 0, 0.043478260869565216, 0.1, 0.037037037037037035, 0.1111111111111111, 0.017857142857142856, 0.012987012987012988, 0.1, 0.0625, 0.1, 0.05555555555555555, 0.018867924528301886, 0.06666666666666667, 0.029411764705882353, 0.06451612903225806, 0.020833333333333332, 0.05263157894736842, 0.02040816326530612, 0.1, 0.1, 0.09090909090909091, 0.06451612903225806, 0, 0, 0.1, 0.10344827586206896, 0.019230769230769232, 0.1, 0.1, 0.125, 0.07692307692307693, 0.1, 0.03571428571428571, 0.025, 0.05555555555555555, 0.021739130434782608, 0.047619047619047616, 0.05555555555555555, 0.11538461538461539, 0.058823529411764705, 0.075, 0.024390243902439025, 0.025, 0.06666666666666667, 0.013888888888888888, 0.07142857142857142, 0.06060606060606061, 0.15384615384615385, 0.06451612903225806, 0.014285714285714285, 0.10526315789473684, 0.1, 0.06, 0.08333333333333333, 0.1111111111111111, 0, 0.09375, 0, 0, 0, 0, 0, 0.2, 0.10526315789473684, 0.020833333333333332, 0, 0, 0.03125, 0.1111111111111111, 0.046511627906976744, 0.09090909090909091, 0.05, 0.1111111111111111, 0.02564102564102564, 0.1111111111111111, 0.03125, 0, 0.017543859649122806, 0.1111111111111111, 0.025, 0.1111111111111111, 0.02631578947368421, 0, 0.02702702702702703, 0, 0.14285714285714285, 0.03225806451612903, 0.1111111111111111, 0.1, 0.043478260869565216, 0.125, 0.037037037037037035, 0.017857142857142856, 0, 0.1111111111111111, 0.034482758620689655, 0.2, 0.2, 0.2, 0.2, 0.125, 0.1, 0.2, 0.025, 0, 0, 0.03125, 0.2, 0.2, 0.044444444444444446, 0.07692307692307693, 0.030303030303030304, 0.1111111111111111, 0.1111111111111111, 0.1111111111111111, 0.03571428571428571, 0.058823529411764705, 0.03225806451612903, 0, 0, 0, 0.01639344262295082, 0.1111111111111111, 0.021739130434782608, 0, 0.020833333333333332, 0.02127659574468085, 0.02127659574468085, 0.01818181818181818, 0.01818181818181818, 0.0196078431372549, 0.0196078431372549, 0.019230769230769232, 0.019230769230769232, 0.0196078431372549, 0.0196078431372549, 0, 0.03125, 0.05555555555555555, 0.07692307692307693, 0.061224489795918366, 0.02127659574468085, 0.02127659574468085, 0.01818181818181818, 0.01818181818181818, 0.0196078431372549, 0.0196078431372549, 0.019230769230769232, 0.019230769230769232, 0.0196078431372549, 0.0196078431372549, 0, 0.03225806451612903, 0.1111111111111111, 0.017857142857142856, 0, 0.04, 0.01818181818181818, 0.01818181818181818, 0.0196078431372549, 0.029850746268656716, 0, 0.04, 0.04, 0.01818181818181818, 0.03571428571428571, 0.0196078431372549, 0.0196078431372549, 0, 0.058823529411764705, 0.1111111111111111, 0.05263157894736842, 0, 0.04, 0.019230769230769232, 0.019230769230769232, 0.0196078431372549, 0.0196078431372549, 0, 0.058823529411764705, 0.06666666666666667, 0.06, 0.019230769230769232, 0.019230769230769232, 0.0196078431372549, 0.0196078431372549, 0, 0.047619047619047616, 0.07142857142857142, 0.07692307692307693, 0.03571428571428571, 0.25, 0.022222222222222223, 0.022222222222222223, 0.022222222222222223, 0, 0.029411764705882353, 0, 0.2, 0, 0, 0.02702702702702703, 0.037037037037037035, 0, 0.2, 0, 0.06896551724137931, 0.017543859649122806, 0.2, 0.1111111111111111, 0, 0.034482758620689655, 0.1111111111111111, 0, 0.022727272727272728, 0.1111111111111111, 0.038461538461538464, 0.024390243902439025, 0, 0, 0.054945054945054944, 0.07692307692307693, 0.04, 0.07692307692307693, 0.038461538461538464, 0.024390243902439025, 0, 0, 0.015384615384615385, 0.029411764705882353, 0.04878048780487805, 0.046511627906976744, 0.045454545454545456, 0.0375, 0.07692307692307693, 0.1111111111111111, 0.1111111111111111, 0.046875, 0.03125, 0.023809523809523808, 0.07692307692307693, 0, 0.031746031746031744, 0.03225806451612903, 0.025, 0.019230769230769232, 0.010752688172043012, 0, 0, 0.03389830508474576, 0.028037383177570093, 0.1111111111111111, 0.041666666666666664, 0.030303030303030304, 0.1111111111111111, 0, 0.029411764705882353, 0.1111111111111111, 0, 0.029411764705882353, 0, 0.029411764705882353, 0.017543859649122806, 0.1111111111111111, 0, 0.037037037037037035, 0.1111111111111111, 0, 0.024390243902439025, 0, 0.1111111111111111, 0, 0, 0.017543859649122806, 0.1111111111111111, 0, 0.1111111111111111, 0.1111111111111111, 0.05714285714285714, 0.02631578947368421, 0.03125, 0.013333333333333334, 0.03225806451612903, 0.07142857142857142, 0.1111111111111111, 0.044444444444444446, 0, 0, 0.022727272727272728, 0, 0.045454545454545456, 0.024096385542168676, 0, 0.07692307692307693, 0.022727272727272728, 0.047619047619047616, 0, 0.03225806451612903, 0, 0.04, 0.02, 0.061946902654867256, 0.058823529411764705, 0.02197802197802198, 0.012658227848101266, 0, 0.014705882352941176, 0.047619047619047616, 0.02857142857142857, 0.015384615384615385, 0.03125, 0.022727272727272728, 0.030303030303030304, 0.022727272727272728, 0.02127659574468085, 0.03389830508474576, 0.03389830508474576, 0.031746031746031744, 0.034482758620689655, 0.018867924528301886, 0.030303030303030304, 0.046875, 0.03571428571428571, 0.023255813953488372, 0, 0.02127659574468085, 0.018867924528301886, 0.017543859649122806, 0.02127659574468085, 0.056818181818181816, 0.017857142857142856, 0.022222222222222223, 0, 0.020833333333333332, 0.016666666666666666, 0.05, 0.07692307692307693, 0.027777777777777776, 0.022988505747126436, 0.04, 0, 0.019417475728155338, 0.041666666666666664, 0.011627906976744186, 0.03225806451612903, 0.01639344262295082, 0.02040816326530612, 0.01818181818181818, 0.0273972602739726, 0.025, 0.015384615384615385, 0.04201680672268908, 0.03508771929824561, 0.058823529411764705, 0.030303030303030304, 0.02564102564102564, 0.0136986301369863, 0.030303030303030304, 0.020833333333333332, 0.030303030303030304, 0.019230769230769232, 0.01818181818181818, 0.014705882352941176, 0.029850746268656716, 0.029850746268656716, 0.028169014084507043, 0.02702702702702703, 0.01639344262295082, 0.02702702702702703, 0.041666666666666664, 0.027777777777777776, 0.0196078431372549, 0, 0.01818181818181818, 0.01639344262295082, 0.015384615384615385, 0.0196078431372549, 0.052083333333333336, 0.015625, 0.018867924528301886, 0, 0.017857142857142856, 0.014705882352941176, 0.046296296296296294, 0.061855670103092786, 0.03125, 0, 0.01818181818181818, 0.015151515151515152, 0, 0, 0, 0.030927835051546393, 0.031914893617021274, 0.030303030303030304, 0.030303030303030304, 0.030303030303030304, 0, 0.0196078431372549, 0, 0, 0, 0.0136986301369863, 0, 0.013513513513513514, 0, 0, 0.009615384615384616, 0.036036036036036036, 0.02702702702702703, 0.011627906976744186, 0.021739130434782608, 0.02197802197802198, 0.025974025974025976, 0.029411764705882353, 0.013157894736842105, 0.033783783783783786, 0.030303030303030304, 0.02531645569620253, 0.017857142857142856, 0, 0.0196078431372549, 0.02702702702702703, 0.03571428571428571, 0.023121387283236993, 0.024096385542168676, 0.026490066225165563, 0.02702702702702703, 0.022988505747126436, 0.01282051282051282, 0.030534351145038167, 0, 0.03076923076923077, 0.025, 0.024096385542168676, 0.029411764705882353, 0, 0.02962962962962963, 0.024390243902439025, 0.024390243902439025, 0.029850746268656716, 0, 0.03007518796992481, 0.027777777777777776, 0.0196078431372549, 0.01834862385321101, 0, 0.013986013986013986, 0.013986013986013986, 0.02702702702702703, 0.01639344262295082, 0.015873015873015872, 0.023809523809523808, 0.024390243902439025, 0.023809523809523808, 0.02564102564102564, 0.02702702702702703, 0.023529411764705882, 0.012048192771084338, 0.026785714285714284, 0.013888888888888888, 0.0273972602739726, 0.013888888888888888, 0.031496062992125984, 0.02702702702702703, 0.025, 0.013888888888888888, 0.014084507042253521, 0, 0.015873015873015872, 0.013157894736842105, 0.02877697841726619, 0.019230769230769232, 0.013157894736842105, 0.021739130434782608, 0.029850746268656716, 0.02, 0.022988505747126436, 0.013157894736842105, 0.02877697841726619, 0.02702702702702703, 0.02702702702702703, 0.03571428571428571, 0.01904761904761905, 0.03333333333333333, 0.05, 0.016666666666666666, 0.05, 0.02702702702702703, 0.013888888888888888, 0.03333333333333333, 0.05, 0.016666666666666666, 0.05, 0.02, 0.02702702702702703, 0.04, 0.017857142857142856, 0.02857142857142857, 0.009345794392523364, 0.023809523809523808, 0.02531645569620253, 0.023809523809523808, 0.02962962962962963, 0.020833333333333332, 0.014492753623188406, 0.023809523809523808, 0.02531645569620253, 0.023809523809523808, 0.02962962962962963, 0.024390243902439025, 0.013333333333333334, 0.022988505747126436, 0, 0.04132231404958678, 0.04132231404958678, 0.024390243902439025, 0.013888888888888888, 0.04285714285714286, 0.012195121951219513, 0.024691358024691357, 0.025806451612903226, 0.024691358024691357, 0.030303030303030304, 0.02040816326530612, 0.014492753623188406, 0.024691358024691357, 0.025806451612903226, 0.024691358024691357, 0.030303030303030304, 0.022222222222222223, 0.013333333333333334, 0.022988505747126436, 0, 0.0423728813559322, 0.05042016806722689, 0.02702702702702703, 0.0196078431372549, 0.023529411764705882, 0.02702702702702703, 0.028169014084507043, 0.03409090909090909, 0.028089887640449437, 0.022988505747126436, 0.025974025974025976, 0.02702702702702703, 0.02702702702702703, 0.045454545454545456, 0.028089887640449437, 0.022988505747126436, 0.03225806451612903, 0.02702702702702703, 0.024390243902439025, 0.0449438202247191, 0.027777777777777776, 0.022727272727272728, 0.02531645569620253, 0.023809523809523808, 0.038461538461538464, 0.024096385542168676, 0.0425531914893617, 0.0425531914893617, 0.03571428571428571, 0.02702702702702703, 0.02702702702702703, 0.25, 0.034482758620689655, 0.030303030303030304, 0, 0.04, 0, 0, 0.02631578947368421, 0.020833333333333332, 0, 0.02631578947368421, 0.03125, 0.025, 0, 0.034482758620689655, 0.020833333333333332, 0.017699115044247787, 0.026785714285714284, 0, 0.02, 0.020833333333333332, 0.021505376344086023, 0.038461538461538464, 0.038461538461538464, 0.024390243902439025, 0.041666666666666664, 0.02702702702702703, 0.023809523809523808, 0.07692307692307693, 0, 0.03508771929824561, 0.058823529411764705, 0.029411764705882353, 0, 0, 0, 0.014285714285714285, 0.058823529411764705, 0.022727272727272728, 0, 0.018518518518518517, 0.060240963855421686, 0.058823529411764705, 0.0425531914893617, 0.024390243902439025, 0.037037037037037035, 0.04081632653061224, 0.020833333333333332, 0.058823529411764705, 0.03225806451612903, 0, 0, 0, 0, 0.01694915254237288, 0, 0.054945054945054944, 0.058823529411764705, 0.030303030303030304, 0, 0, 0.020833333333333332, 0.056179775280898875, 0.0759493670886076, 0.058823529411764705, 0, 0.06521739130434782, 0, 0, 0, 0.058823529411764705, 0.038461538461538464, 0.044444444444444446, 0.022222222222222223, 0.022222222222222223, 0, 0.08333333333333333, 0, 0, 0.037037037037037035, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.037037037037037035, 0.09090909090909091, 0.1111111111111111, 0.1111111111111111, 0.05, 0.2, 0.03333333333333333, 0.02631578947368421, 0, 0.1, 0.2, 0, 0, 0, 0.037037037037037035, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ]
2,810
0.046265
""" Copyright (c) 2011, 2012, Regents of the University of California All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ """ @author Stephen Dawson-Haggerty <stevedh@eecs.berkeley.edu> """ import operator import re from twisted.internet import task from twisted.python import log from smap.driver import SmapDriver from smap.util import periodicSequentialCall, str_path from smap.contrib import dtutil import OpenOPC # properties not to add as metadata PROP_FILTER_LIST = [u"Item Timestamp", u"Item Value"] def exclude(key): for exclude in PROP_FILTER_LIST: if key.find(exclude) >= 0: return True return False class Driver(SmapDriver): def setup(self, opts): self.opc_name = opts.get('OpcName') self.opc_host = opts.get('OpcHost', '127.0.0.1') self.unit_tag = opts.get('OpcUnitProperty', 'Engineering Units') self.points = {opts.get('OpcPoint', '*'): {}} self.opc_timefmt = opts.get("OpcTimeFormat", "%m/%d/%y %H:%M:%S") self.opc_timezone = opts.get("OpcTimezone", "Local") self.rate = int(opts.get("Rate", 30)) if 'OpcPointFile' in opts: with open(opts.get('OpcPointFile'), 'r') as fp: self.points = self.parse_pointfile(fp) else: self.points = None def start(self): self.connect() def stop(self): self.updater.stop() self.opc.close() def make_path(self, point): return point.replace("\\","/") def parse_pointfile(self, fp): pointdfns = {} cur_tag = None while True: line = fp.readline() if not line: break line = re.sub("#(.*)$", "", line.rstrip()) if not re.match("^[ ]+", line): pointdfns[line] = {} cur_tag = line elif cur_tag: pieces = line.lstrip().split(" ") pointdfns[cur_tag][pieces[0]] = ' '.join(pieces[1:]) return pointdfns def parse_pointlist(self, points): pointdfns = {} cur_tag = None for line in points: line = re.sub("#(.*)$", "", line.rstrip()) if not re.match("^[ ]+", line): pointdfns[line] = {} cur_tag = line elif cur_tag: pieces = line.lstrip().split(" ") pointdfns[cur_tag][pieces[0]] = ' '.join(pieces[1:]) return pointdfns def connect(self): print "attempting OPC connection to", self.opc_name self.opc = OpenOPC.open_client(host=self.opc_host) self.opc.connect(self.opc_name, self.opc_host) if self.points is None: pointlist = self.opc.list(recursive=True, flat=True) self.points = self.parse_pointlist(pointlist) props = self.opc.properties(self.points.keys()) print "loaded", len(props), "properties" points = {} for point, pid, key, val in props: key = key.decode().encode('ascii','ignore') key = key.replace(' ','') if isinstance(val, unicode) or isinstance(val, str): val = val.encode('ascii','ignore') name = self.make_path(point) if not name in points: points[name] = self.points[point] if not exclude(key): points[name]['OpcDA/' + key] = str(val) # try to make some sense out of the metadata for name, meta in points.iteritems(): unit = str(meta.get('OpcDA/' + self.unit_tag, 'None')) dtype = meta.get('OpcDA/ItemCanonicalDataType', None) if not dtype: print "no datatype tag in", name continue dtype = 'double' if not self.get_timeseries(name): name = name.decode().encode('ascii','ignore') self.add_timeseries(name, unit, data_type=dtype) self.set_metadata(name, points[name]) vals = self.opc.read(self.points.keys(), group="smap-points-group") self.updater = task.LoopingCall(self.update).start(self.rate) def _update(self): vals = self.opc.read(group="smap-points-group") for point, value, quality, time in vals: # parse the timestamp in the timezone of the server if time is not None: ts = dtutil.strptime_tz(time, self.opc_timefmt, self.opc_timezone) ts = dtutil.dt2ts(ts) else: ts = dtutil.now(self.opc_timezone) ts = dtutil.dt2ts(ts) if self.get_timeseries(self.make_path(point)) and value is not None: if isinstance(value, bool): value = int(value) self._add(self.make_path(point), ts, float(value)) def update(self): try: if not hasattr(self, 'opc'): self.connect() else: self._update() except: log.err() # try to clean up and reconnect on an error try: self.opc.remove(self.opc.groups()) except: pass try: self.opc.close() except: pass del self.opc
[ "\"\"\"\r\n", "Copyright (c) 2011, 2012, Regents of the University of California\r\n", "All rights reserved.\r\n", "\r\n", "Redistribution and use in source and binary forms, with or without\r\n", "modification, are permitted provided that the following conditions\r\n", "are met:\r\n", "\r\n", " - Redistributions of source code must retain the above copyright\r\n", " notice, this list of conditions and the following disclaimer.\r\n", " - Redistributions in binary form must reproduce the above copyright\r\n", " notice, this list of conditions and the following disclaimer in the\r\n", " documentation and/or other materials provided with the\r\n", " distribution.\r\n", "\r\n", "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r\n", "\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r\n", "LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\r\n", "FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL\r\n", "THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,\r\n", "INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\r\n", "(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\r\n", "SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\r\n", "HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\r\n", "STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\r\n", "ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED\r\n", "OF THE POSSIBILITY OF SUCH DAMAGE.\r\n", "\"\"\"\r\n", "\"\"\"\r\n", "@author Stephen Dawson-Haggerty <stevedh@eecs.berkeley.edu>\r\n", "\"\"\"\r\n", "\r\n", "import operator\r\n", "import re\r\n", "from twisted.internet import task\r\n", "from twisted.python import log\r\n", "\r\n", "from smap.driver import SmapDriver\r\n", "from smap.util import periodicSequentialCall, str_path\r\n", "from smap.contrib import dtutil\r\n", "\r\n", "import OpenOPC\r\n", "\r\n", "# properties not to add as metadata\r\n", "PROP_FILTER_LIST = [u\"Item Timestamp\",\r\n", " u\"Item Value\"]\r\n", "\r\n", "\r\n", "def exclude(key):\r\n", " for exclude in PROP_FILTER_LIST:\r\n", " if key.find(exclude) >= 0: return True\r\n", " return False\r\n", "\r\n", "class Driver(SmapDriver):\r\n", " def setup(self, opts):\r\n", " self.opc_name = opts.get('OpcName')\r\n", " self.opc_host = opts.get('OpcHost', '127.0.0.1')\r\n", " self.unit_tag = opts.get('OpcUnitProperty', 'Engineering Units')\r\n", " self.points = {opts.get('OpcPoint', '*'): {}}\r\n", " self.opc_timefmt = opts.get(\"OpcTimeFormat\", \"%m/%d/%y %H:%M:%S\")\r\n", " self.opc_timezone = opts.get(\"OpcTimezone\", \"Local\")\r\n", "\r\n", " self.rate = int(opts.get(\"Rate\", 30))\r\n", " if 'OpcPointFile' in opts:\r\n", " with open(opts.get('OpcPointFile'), 'r') as fp:\r\n", " self.points = self.parse_pointfile(fp)\r\n", " else:\r\n", " self.points = None\r\n", "\r\n", " def start(self):\r\n", " self.connect()\r\n", "\r\n", " def stop(self):\r\n", " self.updater.stop()\r\n", " self.opc.close()\r\n", "\r\n", " def make_path(self, point):\r\n", "\treturn point.replace(\"\\\\\",\"/\")\r\n", "\r\n", " def parse_pointfile(self, fp):\r\n", " pointdfns = {}\r\n", " cur_tag = None\r\n", " while True:\r\n", " line = fp.readline()\r\n", " if not line: break\r\n", " line = re.sub(\"#(.*)$\", \"\", line.rstrip())\r\n", " if not re.match(\"^[ ]+\", line):\r\n", " pointdfns[line] = {}\r\n", " cur_tag = line\r\n", " elif cur_tag:\r\n", " pieces = line.lstrip().split(\" \")\r\n", " pointdfns[cur_tag][pieces[0]] = ' '.join(pieces[1:])\r\n", " return pointdfns\r\n", "\r\n", " def parse_pointlist(self, points):\r\n", " pointdfns = {}\r\n", " cur_tag = None\r\n", " for line in points:\r\n", " line = re.sub(\"#(.*)$\", \"\", line.rstrip())\r\n", " if not re.match(\"^[ ]+\", line):\r\n", " pointdfns[line] = {}\r\n", " cur_tag = line\r\n", " elif cur_tag:\r\n", " pieces = line.lstrip().split(\" \")\r\n", " pointdfns[cur_tag][pieces[0]] = ' '.join(pieces[1:])\r\n", " return pointdfns\r\n", "\r\n", "\r\n", " def connect(self):\r\n", " print \"attempting OPC connection to\", self.opc_name\r\n", " self.opc = OpenOPC.open_client(host=self.opc_host)\r\n", " self.opc.connect(self.opc_name, self.opc_host)\r\n", " if self.points is None:\r\n", " pointlist = self.opc.list(recursive=True, flat=True)\r\n", " self.points = self.parse_pointlist(pointlist)\r\n", " props = self.opc.properties(self.points.keys())\r\n", " print \"loaded\", len(props), \"properties\"\r\n", " points = {}\r\n", " for point, pid, key, val in props:\r\n", " key = key.decode().encode('ascii','ignore')\r\n", " key = key.replace(' ','')\r\n", " if isinstance(val, unicode) or isinstance(val, str):\r\n", " val = val.encode('ascii','ignore')\r\n", " name = self.make_path(point)\r\n", " if not name in points:\r\n", " points[name] = self.points[point]\r\n", " if not exclude(key):\r\n", " points[name]['OpcDA/' + key] = str(val)\r\n", "\r\n", " # try to make some sense out of the metadata\r\n", " for name, meta in points.iteritems():\r\n", " unit = str(meta.get('OpcDA/' + self.unit_tag, 'None'))\r\n", " dtype = meta.get('OpcDA/ItemCanonicalDataType', None)\r\n", " if not dtype:\r\n", " print \"no datatype tag in\", name\r\n", " continue\r\n", " dtype = 'double'\r\n", " if not self.get_timeseries(name):\r\n", " name = name.decode().encode('ascii','ignore')\r\n", " self.add_timeseries(name, unit, data_type=dtype)\r\n", " self.set_metadata(name, points[name])\r\n", " vals = self.opc.read(self.points.keys(), group=\"smap-points-group\")\r\n", " self.updater = task.LoopingCall(self.update).start(self.rate)\r\n", "\r\n", " def _update(self):\r\n", " vals = self.opc.read(group=\"smap-points-group\")\r\n", " for point, value, quality, time in vals:\r\n", " # parse the timestamp in the timezone of the server\r\n", " if time is not None:\r\n", " ts = dtutil.strptime_tz(time, self.opc_timefmt, self.opc_timezone)\r\n", " ts = dtutil.dt2ts(ts)\r\n", " else:\r\n", " ts = dtutil.now(self.opc_timezone)\r\n", " ts = dtutil.dt2ts(ts)\r\n", " if self.get_timeseries(self.make_path(point)) and value is not None:\r\n", " if isinstance(value, bool): value = int(value)\r\n", " self._add(self.make_path(point), ts, float(value))\r\n", "\r\n", " def update(self):\r\n", " try:\r\n", " if not hasattr(self, 'opc'):\r\n", " self.connect()\r\n", " else:\r\n", " self._update()\r\n", " except:\r\n", " log.err()\r\n", "\r\n", " # try to clean up and reconnect on an error\r\n", " try:\r\n", " self.opc.remove(self.opc.groups())\r\n", " except:\r\n", " pass\r\n", "\r\n", " try:\r\n", " self.opc.close()\r\n", " except:\r\n", " pass\r\n", " del self.opc\r\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705, 0.09090909090909091, 0.02857142857142857, 0.03125, 0, 0.027777777777777776, 0.017857142857142856, 0.030303030303030304, 0, 0.0625, 0, 0, 0, 0, 0, 0, 0, 0, 0.0425531914893617, 0, 0, 0.037037037037037035, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.09090909090909091, 0, 0.027777777777777776, 0, 0, 0, 0, 0.03125, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.041666666666666664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.017543859649122806, 0.02564102564102564, 0, 0.019230769230769232, 0, 0.027777777777777776, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.015873015873015872, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0.012195121951219513, 0.015625, 0, 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705, 0, 0, 0, 0, 0, 0.047619047619047616, 0, 0, 0, 0, 0.047619047619047616, 0, 0 ]
178
0.005163
import os import urllib, urllib2 import re import socket import xbmc import xbmcplugin import xbmcgui import xbmcaddon from operator import itemgetter import XbmcHelpers common = XbmcHelpers import resources.lib.search as search import resources.lib.hdgo as hdgo import resources.lib.videoframe as videoframe import resources.lib.hdbaza as hdbaza from videohosts import moonwalk from videohosts import kodik from videohosts import iframe from videohosts import videocdn from videohosts import hdvb from videohosts import collaps socket.setdefaulttimeout(120) HANDLE = int(sys.argv[1]) ID = 'context.dandy.kinopoisk.sc' ADDON = xbmcaddon.Addon(ID) PATH = ADDON.getAddonInfo('path') EXT_SEARCH = ADDON.getSetting('ext_search') if ADDON.getSetting('ext_search') else "false" PARAMS = None MODE = None def get_media_title(kp_id, media_title): if not kp_id: return media_title if media_title: media_title_ = media_title else: media_title_ = "<" + kp_id + ">" response = common.fetchPage({"link": "https://www.kinopoisk.ru/film/" + kp_id + "/"}) if response["status"] == 200: content = response["content"] try: div = common.parseDOM(content, "div", attrs={"id": "headerFilm"})[0] media_title_ = strip_(encode_('utf-8', common.parseDOM(div, "h1")[0])) except: pass return replace_(media_title_) def get_media_image(kp_id): return "https://st.kp.yandex.net/images/film_big/" + kp_id + ".jpg" def search_kp_id(media_title, mode): media = [] media_titles = [] response = common.fetchPage({"link": "http://www.kinopoisk.ru/index.php?first=no&what=&kp_query=" + urllib.quote_plus(media_title)}) if response["status"] == 200: content = response["content"] try: div = common.parseDOM(content, "div", attrs={"class": "search_results"})[0] info = common.parseDOM(div, "div", attrs={"class": "info"})[0] title = encode_('utf-8', decode_('cp1251', common.parseDOM(info, "a")[0])) media.append(common.parseDOM(info, "a", ret="data-id")[0]) media_titles.append(replace_(title + " (" + common.parseDOM(info, "span")[0] + ")")) if (EXT_SEARCH == "true") and (mode != "search"): divmain = common.parseDOM(content, "div", attrs={"class": "search_results search_results_last"})[0] divs = common.parseDOM(divmain, "div", attrs={"class": "element"}) for div in divs: info = common.parseDOM(div, "div", attrs={"class": "info"})[0] title = encode_('utf-8', decode_('cp1251', common.parseDOM(info, "a")[0])) if media_title.decode('utf-8').upper() == title.decode('utf-8').upper(): media.append(common.parseDOM(info, "a", ret="data-id")[0]) media_titles.append(replace_(title + " (" + common.parseDOM(info, "span")[0] + ")")) except: pass ret = 0 if len(media) > 0: if len(media) > 1: ret = xbmcgui.Dialog().select("Select media", media_titles) if ret >= 0: return media[ret] else: return None else: return None def get_user_input_id(): dialog = xbmcgui.Dialog() kp_id = None result = dialog.input('Input Kinopoisk ID', '', type = xbmcgui.INPUT_NUMERIC) if result: kp_id = result return kp_id def get_user_input_title(): dialog = xbmcgui.Dialog() title = None result = dialog.input('Input Title', '') if result: title = result return title def get_user_input(): variants = ["Search by ID", "Search by Title"] dialog = xbmcgui.Dialog() index_ = dialog.select("Select search type", variants) if (index_ == 0): return get_user_input_id() elif (index_ == 1): title = get_user_input_title() if title: return search_kp_id(title, None) else: return None def get_kp_id(media_title, mode): if media_title: return search_kp_id(media_title, mode) else: return get_user_input() def get_engine(data): if 'moonwalk' in data: return 'moonwalk' elif 'hdgo' in data: return 'hdgo' elif 'kodik' in data: return 'kodik' elif 'videoframe' in data: return 'videoframe' elif 'hdnow' in data: return 'hdnow' elif 'czx' in data: return 'czx' elif 'iframe' in data: return 'iframe' elif 'hdbaza' in data: return 'hdbaza' elif 'videocdn' in data: return 'videocdn' elif 'hdvb' in data: return 'hdvb' elif 'collaps' in data: return 'collaps' else: return 'none' def prepare(mode, kp_id, orig_title, media_title, image): search_kp_id = False if (not kp_id): kp_id = get_kp_id(media_title, mode) search_kp_id = True if (not kp_id): return None, "", "", "" media_title = get_media_title(kp_id, media_title) if orig_title == None: orig_title = media_title image = get_media_image(kp_id) return kp_id, orig_title, media_title, image def main_(mode, kp_id, orig_title, media_title, image): kp_id, orig_title, media_title, image = prepare(mode, kp_id, orig_title, media_title, image) if (not kp_id): return #if mode == "search": # process(kp_id, media_title, image) #else: film_title = " %s" % (orig_title) uri = sys.argv[0] + '?mode=process&kp_id=%s&media_title=%s&image=%s' % (kp_id, urllib.quote_plus(media_title), urllib.quote_plus(image)) item = xbmcgui.ListItem(film_title, iconImage=image, thumbnailImage=image) item.setInfo(type='Video', infoLabels={'title': film_title, 'label': film_title, 'plot': film_title}) xbmcplugin.addDirectoryItem(HANDLE, uri, item, True) xbmcplugin.setContent(HANDLE, 'movies') xbmcplugin.endOfDirectory(HANDLE, True) def process(kp_id, media_title, image): if (not kp_id): kp_id, media_title, media_title, image = prepare("process", kp_id, media_title, media_title, image) if (not kp_id): return list_li = [] list_li = search.process(kp_id) for li in list_li: engine = get_engine(li[1].getLabel()) li[0] = li[0] + ("&media_title=%s&image=%s&engine=%s" % ((urllib.quote_plus(encode_("utf-8", media_title))) if (media_title != "") else "", image, engine)) li[1].setIconImage(image) li[1].setThumbnailImage(image) if ("*T*" in li[1].getLabel()): title = li[1].getLabel().replace("*T*", media_title) li[1].setLabel(title) li[0] = li[0] + ("&title=%s" % (urllib.quote_plus(title))) li[1].setInfo(type='Video', infoLabels={'title': li[1].getLabel(), 'label': media_title, 'plot': media_title}) xbmcplugin.addDirectoryItem(HANDLE, li[0], li[1], li[2]) xbmcplugin.setContent(HANDLE, 'movies') xbmcplugin.endOfDirectory(HANDLE, True) def show_moonwalk(url, title): return moonwalk.get_playlist(url) def show_hdgo(url, title): return hdgo.get_playlist(url) def show_kodik(url, title): return kodik.get_playlist(url) def show_videoframe(url, title): return videoframe.get_playlist(url) def show_iframe(url, title): return iframe.get_playlist(url) def show_hdbaza(url, title): return hdbaza.get_playlist(url) def show_videocdn(url, title): return videocdn.get_playlist(url) def show_hdvb(url, title): return hdvb.get_playlist(url) def show_collaps(url, title): return collaps.get_playlist(url) def show(url, title, media_title, image, engine): manifest_links = {} subtitles = None if (not media_title): media_title = title direct = 0 if ('moonwalk' in engine) or ('hdnow' in engine) or ('czx' in engine): manifest_links, subtitles, season, episode = show_moonwalk(url, title) direct = 1 elif 'hdgo' in engine: manifest_links, subtitles, season, episode = show_hdgo(url, title) elif 'kodik' in engine: manifest_links, subtitles, season, episode, direct = show_kodik(url, title) elif ('videoframe' in engine): manifest_links, subtitles, season, episode = show_videoframe(url, title) elif ('iframe' in engine): manifest_links, subtitles, season, episode = show_iframe(url, title) elif ('hdbaza' in engine): manifest_links, subtitles, season, episode = show_hdbaza(url, title) elif ('videocdn' in engine): manifest_links, subtitles, season, episode = show_videocdn(url, title) elif ('hdvb' in engine): manifest_links, subtitles, season, episode = show_hdvb(url, title) elif ('collaps' in engine): manifest_links, subtitles, season, episode = show_collaps(url, title) direct = 1 if manifest_links: list = sorted(manifest_links.iteritems(), key=itemgetter(0)) if season: title += " - s%se%s" % (season.zfill(2), episode.zfill(2)) for quality, link in list: film_title = "[COLOR=lightgreen][%s][/COLOR] %s" % (str(quality), title) try: uri = sys.argv[0] + '?mode=play&url=%s&title=%s&media_title=%s&direct=%d' % (urllib.quote_plus(link), urllib.quote_plus(title), urllib.quote_plus(media_title), direct) except: uri = sys.argv[0] + '?mode=play&url=%s&title=%s&media_title=%s&direct=%d' % (link, title, media_title, direct) item = xbmcgui.ListItem(film_title, iconImage=image, thumbnailImage=image) item.setInfo(type='Video', infoLabels={'title': film_title, 'label': film_title, 'plot': film_title, 'overlay': xbmcgui.ICON_OVERLAY_WATCHED, 'playCount': 0}) item.setProperty('IsPlayable', 'true') if subtitles: #urls = re.compile('http:\/\/.*?\.srt').findall(subtitles) item.setSubtitles([subtitles]) xbmcplugin.addDirectoryItem(HANDLE, uri, item, False) xbmcplugin.setContent(HANDLE, 'movies') xbmcplugin.endOfDirectory(HANDLE, True) def play(url, direct): if (direct != 1) and ("m3u8" in url): url = ("http:" if (not (("http://" in url) or ("https://" in url))) else "") + url response = common.fetchPage({"link": url}) if (not (("http://" in response["content"]) or ("https://" in response["content"]))): content = response["content"].split("\n") name = os.path.join(PATH.decode("utf-8"), 'resources/playlists/') + "temp.m3u8" block = url.split("mp4")[0] f = open(name, "w+") for line in content: if "mp4" in line: line = block + "mp4" + line.split("mp4")[-1] f.write(line + "\n") f.close() item = xbmcgui.ListItem(path=name) else: item = xbmcgui.ListItem(path=url) else: item = xbmcgui.ListItem(path=url) xbmcplugin.setResolvedUrl(HANDLE, True, item) def decode_(code, param): try: return param.decode(code) except: return param def encode_(code, param): try: return unicode(param).encode(code) except: return param def strip_(string): return common.stripTags(string) def replace_(string): return string.replace("&ndash;", "/").replace("&nbsp;", " ") def main(): PARAMS = common.getParameters(sys.argv[2]) kp_id = PARAMS['kp_id'] if ('kp_id' in PARAMS) else None mode = PARAMS['mode'] if 'mode' in PARAMS else None url = urllib.unquote_plus(PARAMS['url']) if 'url' in PARAMS else None title = urllib.unquote_plus(PARAMS['title']) if 'title' in PARAMS else None orig_title = urllib.unquote_plus(PARAMS['orig_title']) if 'orig_title' in PARAMS else None media_title = urllib.unquote_plus(PARAMS['media_title']) if 'media_title' in PARAMS else None if orig_title == None: orig_title = media_title image = urllib.unquote_plus(PARAMS['image']) if 'image' in PARAMS else None direct = int(PARAMS['direct']) if 'direct' in PARAMS else None engine = PARAMS['engine'] if 'engine' in PARAMS else None if (not mode) or (mode == "context") or (mode == "search"): main_(mode, kp_id, orig_title, media_title, image) elif mode == "process": process(kp_id, media_title, image) elif mode == "show": show(url, title, media_title, image, engine) elif mode == "play": play(url, direct) if __name__ == '__main__': main()
[ "import os\n", "import urllib, urllib2\n", "import re\n", "import socket\n", "import xbmc\n", "import xbmcplugin\n", "import xbmcgui\n", "import xbmcaddon\n", "from operator import itemgetter\n", "import XbmcHelpers\n", "common = XbmcHelpers\n", "\n", "import resources.lib.search as search\n", "\n", "import resources.lib.hdgo as hdgo\n", "import resources.lib.videoframe as videoframe\n", "import resources.lib.hdbaza as hdbaza\n", "\n", "from videohosts import moonwalk\n", "from videohosts import kodik\n", "from videohosts import iframe\n", "from videohosts import videocdn\n", "from videohosts import hdvb\n", "from videohosts import collaps\n", "\n", "socket.setdefaulttimeout(120)\n", "\n", "HANDLE = int(sys.argv[1])\n", "ID = 'context.dandy.kinopoisk.sc'\n", "ADDON = xbmcaddon.Addon(ID)\n", "PATH = ADDON.getAddonInfo('path')\n", "\n", "EXT_SEARCH = ADDON.getSetting('ext_search') if ADDON.getSetting('ext_search') else \"false\"\n", "\n", "PARAMS = None\n", "MODE = None\n", "\n", "def get_media_title(kp_id, media_title):\n", " if not kp_id:\n", " return media_title\n", "\n", " if media_title:\n", " media_title_ = media_title\n", " else:\n", " media_title_ = \"<\" + kp_id + \">\"\n", "\n", " response = common.fetchPage({\"link\": \"https://www.kinopoisk.ru/film/\" + kp_id + \"/\"})\n", "\n", " if response[\"status\"] == 200:\n", " content = response[\"content\"]\n", " try:\n", " div = common.parseDOM(content, \"div\", attrs={\"id\": \"headerFilm\"})[0]\n", " media_title_ = strip_(encode_('utf-8', common.parseDOM(div, \"h1\")[0]))\n", " except:\n", " pass\n", " return replace_(media_title_)\n", "\n", "\n", "def get_media_image(kp_id):\n", " return \"https://st.kp.yandex.net/images/film_big/\" + kp_id + \".jpg\"\n", "\n", "def search_kp_id(media_title, mode):\n", " media = []\n", " media_titles = []\n", "\n", " response = common.fetchPage({\"link\": \"http://www.kinopoisk.ru/index.php?first=no&what=&kp_query=\" + urllib.quote_plus(media_title)})\n", "\n", " if response[\"status\"] == 200:\n", " content = response[\"content\"]\n", "\n", " try:\n", " div = common.parseDOM(content, \"div\", attrs={\"class\": \"search_results\"})[0]\n", " info = common.parseDOM(div, \"div\", attrs={\"class\": \"info\"})[0]\n", " title = encode_('utf-8', decode_('cp1251', common.parseDOM(info, \"a\")[0]))\n", " media.append(common.parseDOM(info, \"a\", ret=\"data-id\")[0]) \n", " media_titles.append(replace_(title + \" (\" + common.parseDOM(info, \"span\")[0] + \")\"))\n", " if (EXT_SEARCH == \"true\") and (mode != \"search\"):\n", " divmain = common.parseDOM(content, \"div\", attrs={\"class\": \"search_results search_results_last\"})[0]\n", " divs = common.parseDOM(divmain, \"div\", attrs={\"class\": \"element\"})\n", " for div in divs:\n", " info = common.parseDOM(div, \"div\", attrs={\"class\": \"info\"})[0]\n", " title = encode_('utf-8', decode_('cp1251', common.parseDOM(info, \"a\")[0]))\n", " if media_title.decode('utf-8').upper() == title.decode('utf-8').upper(): \n", " media.append(common.parseDOM(info, \"a\", ret=\"data-id\")[0])\n", " media_titles.append(replace_(title + \" (\" + common.parseDOM(info, \"span\")[0] + \")\"))\n", " except:\n", " pass\n", "\n", " ret = 0\n", " if len(media) > 0:\n", " if len(media) > 1:\n", " ret = xbmcgui.Dialog().select(\"Select media\", media_titles)\n", " if ret >= 0:\n", " return media[ret]\n", " else:\n", " return None\n", " else:\n", " return None\n", "\n", "def get_user_input_id():\n", " dialog = xbmcgui.Dialog()\n", " kp_id = None\n", " result = dialog.input('Input Kinopoisk ID', '', type = xbmcgui.INPUT_NUMERIC)\n", " if result:\n", " kp_id = result\n", " return kp_id\n", "\n", "def get_user_input_title():\n", " dialog = xbmcgui.Dialog()\n", " title = None\n", " result = dialog.input('Input Title', '')\n", " if result:\n", " title = result\n", " return title\n", "\n", "def get_user_input():\n", " variants = [\"Search by ID\", \"Search by Title\"]\n", " dialog = xbmcgui.Dialog()\n", " index_ = dialog.select(\"Select search type\", variants)\n", " if (index_ == 0):\n", " return get_user_input_id()\n", " elif (index_ == 1):\n", " title = get_user_input_title()\n", " if title:\n", " return search_kp_id(title, None)\n", " else: \n", " return None \n", "\n", "def get_kp_id(media_title, mode):\n", " if media_title:\n", " return search_kp_id(media_title, mode)\n", " else:\n", " return get_user_input()\n", "\n", "def get_engine(data):\n", " if 'moonwalk' in data:\n", " return 'moonwalk'\n", " elif 'hdgo' in data:\n", " return 'hdgo'\n", " elif 'kodik' in data:\n", " return 'kodik'\n", " elif 'videoframe' in data:\n", " return 'videoframe'\n", " elif 'hdnow' in data:\n", " return 'hdnow'\n", " elif 'czx' in data:\n", " return 'czx'\n", " elif 'iframe' in data:\n", " return 'iframe'\n", " elif 'hdbaza' in data:\n", " return 'hdbaza'\n", " elif 'videocdn' in data:\n", " return 'videocdn'\n", " elif 'hdvb' in data:\n", " return 'hdvb'\n", " elif 'collaps' in data:\n", " return 'collaps'\n", " else:\n", " return 'none'\n", "\n", "def prepare(mode, kp_id, orig_title, media_title, image):\n", " search_kp_id = False \n", " if (not kp_id):\n", " kp_id = get_kp_id(media_title, mode)\n", " search_kp_id = True \n", " if (not kp_id):\n", " return None, \"\", \"\", \"\"\n", "\n", " media_title = get_media_title(kp_id, media_title)\n", " if orig_title == None:\n", " orig_title = media_title\n", " image = get_media_image(kp_id)\n", "\n", " return kp_id, orig_title, media_title, image\n", "\n", "def main_(mode, kp_id, orig_title, media_title, image):\n", " kp_id, orig_title, media_title, image = prepare(mode, kp_id, orig_title, media_title, image)\n", " if (not kp_id):\n", " return\n", " #if mode == \"search\":\n", " # process(kp_id, media_title, image)\n", " #else: \n", " film_title = \" %s\" % (orig_title)\n", " uri = sys.argv[0] + '?mode=process&kp_id=%s&media_title=%s&image=%s' % (kp_id, urllib.quote_plus(media_title), urllib.quote_plus(image))\n", " item = xbmcgui.ListItem(film_title, iconImage=image, thumbnailImage=image)\n", " item.setInfo(type='Video', infoLabels={'title': film_title, 'label': film_title, 'plot': film_title})\n", " xbmcplugin.addDirectoryItem(HANDLE, uri, item, True)\n", " xbmcplugin.setContent(HANDLE, 'movies')\n", " xbmcplugin.endOfDirectory(HANDLE, True)\n", "\n", "def process(kp_id, media_title, image):\n", " if (not kp_id):\n", " kp_id, media_title, media_title, image = prepare(\"process\", kp_id, media_title, media_title, image)\n", " if (not kp_id):\n", " return\n", " list_li = []\n", " list_li = search.process(kp_id)\n", " \n", " for li in list_li:\n", " engine = get_engine(li[1].getLabel())\n", " \n", " li[0] = li[0] + (\"&media_title=%s&image=%s&engine=%s\" % ((urllib.quote_plus(encode_(\"utf-8\", media_title))) if (media_title != \"\") else \"\", image, engine))\n", " li[1].setIconImage(image)\n", " li[1].setThumbnailImage(image)\n", " if (\"*T*\" in li[1].getLabel()):\n", " title = li[1].getLabel().replace(\"*T*\", media_title)\n", " li[1].setLabel(title)\n", " li[0] = li[0] + (\"&title=%s\" % (urllib.quote_plus(title)))\n", " \n", " li[1].setInfo(type='Video', infoLabels={'title': li[1].getLabel(), 'label': media_title, 'plot': media_title})\n", " xbmcplugin.addDirectoryItem(HANDLE, li[0], li[1], li[2])\n", " xbmcplugin.setContent(HANDLE, 'movies')\n", " xbmcplugin.endOfDirectory(HANDLE, True)\n", "\n", "def show_moonwalk(url, title):\n", " return moonwalk.get_playlist(url)\n", "\n", "def show_hdgo(url, title):\n", " return hdgo.get_playlist(url)\n", "\n", "def show_kodik(url, title):\n", " return kodik.get_playlist(url)\n", "\n", "def show_videoframe(url, title):\n", " return videoframe.get_playlist(url)\n", "\n", "def show_iframe(url, title):\n", " return iframe.get_playlist(url)\n", "\n", "def show_hdbaza(url, title):\n", " return hdbaza.get_playlist(url)\n", "\n", "def show_videocdn(url, title):\n", " return videocdn.get_playlist(url)\n", "\n", "def show_hdvb(url, title):\n", " return hdvb.get_playlist(url)\n", "\n", "def show_collaps(url, title):\n", " return collaps.get_playlist(url)\n", "\n", "def show(url, title, media_title, image, engine):\n", " manifest_links = {} \n", " subtitles = None\n", " if (not media_title):\n", " media_title = title\n", " direct = 0\n", " if ('moonwalk' in engine) or ('hdnow' in engine) or ('czx' in engine):\n", " manifest_links, subtitles, season, episode = show_moonwalk(url, title)\n", " direct = 1\n", " elif 'hdgo' in engine:\n", " manifest_links, subtitles, season, episode = show_hdgo(url, title)\n", " elif 'kodik' in engine:\n", " manifest_links, subtitles, season, episode, direct = show_kodik(url, title)\n", " elif ('videoframe' in engine):\n", " manifest_links, subtitles, season, episode = show_videoframe(url, title)\n", " elif ('iframe' in engine):\n", " manifest_links, subtitles, season, episode = show_iframe(url, title)\n", " elif ('hdbaza' in engine):\n", " manifest_links, subtitles, season, episode = show_hdbaza(url, title)\n", " elif ('videocdn' in engine):\n", " manifest_links, subtitles, season, episode = show_videocdn(url, title)\n", " elif ('hdvb' in engine):\n", " manifest_links, subtitles, season, episode = show_hdvb(url, title)\n", " elif ('collaps' in engine):\n", " manifest_links, subtitles, season, episode = show_collaps(url, title)\n", " direct = 1\n", "\n", " if manifest_links:\n", " list = sorted(manifest_links.iteritems(), key=itemgetter(0))\n", " if season:\n", " title += \" - s%se%s\" % (season.zfill(2), episode.zfill(2))\n", " for quality, link in list:\n", " film_title = \"[COLOR=lightgreen][%s][/COLOR] %s\" % (str(quality), title)\n", " try:\n", " uri = sys.argv[0] + '?mode=play&url=%s&title=%s&media_title=%s&direct=%d' % (urllib.quote_plus(link), urllib.quote_plus(title), urllib.quote_plus(media_title), direct)\n", " except: \n", " uri = sys.argv[0] + '?mode=play&url=%s&title=%s&media_title=%s&direct=%d' % (link, title, media_title, direct) \n", " item = xbmcgui.ListItem(film_title, iconImage=image, thumbnailImage=image)\n", " item.setInfo(type='Video', infoLabels={'title': film_title, 'label': film_title, 'plot': film_title, 'overlay': xbmcgui.ICON_OVERLAY_WATCHED, 'playCount': 0})\n", " item.setProperty('IsPlayable', 'true')\n", " if subtitles: \n", " #urls = re.compile('http:\\/\\/.*?\\.srt').findall(subtitles)\n", " item.setSubtitles([subtitles])\n", " xbmcplugin.addDirectoryItem(HANDLE, uri, item, False)\n", " xbmcplugin.setContent(HANDLE, 'movies')\n", " xbmcplugin.endOfDirectory(HANDLE, True)\n", "\n", "\n", "def play(url, direct):\n", " if (direct != 1) and (\"m3u8\" in url):\n", " url = (\"http:\" if (not ((\"http://\" in url) or (\"https://\" in url))) else \"\") + url\n", " response = common.fetchPage({\"link\": url})\n", " if (not ((\"http://\" in response[\"content\"]) or (\"https://\" in response[\"content\"]))):\n", " content = response[\"content\"].split(\"\\n\")\n", " name = os.path.join(PATH.decode(\"utf-8\"), 'resources/playlists/') + \"temp.m3u8\"\n", " block = url.split(\"mp4\")[0]\n", " f = open(name, \"w+\")\n", " for line in content:\n", " if \"mp4\" in line:\n", " line = block + \"mp4\" + line.split(\"mp4\")[-1]\n", " f.write(line + \"\\n\")\n", " f.close()\n", " item = xbmcgui.ListItem(path=name)\n", " else:\n", " item = xbmcgui.ListItem(path=url) \n", " else:\n", " item = xbmcgui.ListItem(path=url) \n", " xbmcplugin.setResolvedUrl(HANDLE, True, item)\n", "\n", "def decode_(code, param):\n", " try:\n", " return param.decode(code)\n", " except:\n", " return param\n", "\n", "def encode_(code, param):\n", " try:\n", " return unicode(param).encode(code)\n", " except:\n", " return param\n", "\n", "def strip_(string):\n", " return common.stripTags(string)\n", "\n", "def replace_(string):\n", " return string.replace(\"&ndash;\", \"/\").replace(\"&nbsp;\", \" \")\n", "\n", "def main():\n", " PARAMS = common.getParameters(sys.argv[2])\n", " kp_id = PARAMS['kp_id'] if ('kp_id' in PARAMS) else None\n", " mode = PARAMS['mode'] if 'mode' in PARAMS else None\n", " url = urllib.unquote_plus(PARAMS['url']) if 'url' in PARAMS else None\n", " title = urllib.unquote_plus(PARAMS['title']) if 'title' in PARAMS else None\n", " orig_title = urllib.unquote_plus(PARAMS['orig_title']) if 'orig_title' in PARAMS else None \n", " media_title = urllib.unquote_plus(PARAMS['media_title']) if 'media_title' in PARAMS else None\n", " if orig_title == None:\n", " orig_title = media_title\n", " image = urllib.unquote_plus(PARAMS['image']) if 'image' in PARAMS else None\n", " direct = int(PARAMS['direct']) if 'direct' in PARAMS else None\n", " engine = PARAMS['engine'] if 'engine' in PARAMS else None\n", "\n", " if (not mode) or (mode == \"context\") or (mode == \"search\"):\n", " main_(mode, kp_id, orig_title, media_title, image)\n", " elif mode == \"process\":\n", " process(kp_id, media_title, image) \n", " elif mode == \"show\":\n", " show(url, title, media_title, image, engine)\n", " elif mode == \"play\":\n", " play(url, direct)\n", "\n", "if __name__ == '__main__':\n", " main()" ]
[ 0, 0.043478260869565216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02631578947368421, 0, 0.029411764705882353, 0.021739130434782608, 0.02631578947368421, 0, 0.03125, 0.034482758620689655, 0.03333333333333333, 0.03125, 0.03571428571428571, 0.03225806451612903, 0, 0, 0, 0, 0, 0, 0, 0, 0.01098901098901099, 0, 0, 0, 0, 0.024390243902439025, 0, 0, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0.012345679012345678, 0.012048192771084338, 0.0625, 0, 0, 0, 0, 0, 0, 0, 0.02702702702702703, 0, 0, 0, 0.0072992700729927005, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0.011494252873563218, 0.012048192771084338, 0.010309278350515464, 0, 0.008620689655172414, 0.012048192771084338, 0, 0.012048192771084338, 0.010526315789473684, 0.02127659574468085, 0.012048192771084338, 0.009174311926605505, 0.0625, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04, 0, 0, 0.036585365853658534, 0, 0, 0, 0, 0.03571428571428571, 0, 0, 0, 0, 0, 0, 0, 0.045454545454545456, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.06666666666666667, 0.03571428571428571, 0, 0.029411764705882353, 0, 0, 0, 0, 0, 0.045454545454545456, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.017241379310344827, 0.038461538461538464, 0, 0, 0.034482758620689655, 0, 0, 0, 0, 0.037037037037037035, 0, 0, 0, 0, 0, 0.017857142857142856, 0.010309278350515464, 0, 0, 0.038461538461538464, 0, 0.13333333333333333, 0, 0.0070921985815602835, 0, 0.009433962264150943, 0, 0, 0, 0, 0.025, 0, 0.009259259259259259, 0, 0, 0, 0, 0.2, 0, 0, 0.1111111111111111, 0.006097560975609756, 0, 0, 0, 0, 0, 0, 0.058823529411764705, 0.008403361344537815, 0, 0, 0, 0, 0.03225806451612903, 0, 0, 0.037037037037037035, 0, 0, 0.03571428571428571, 0, 0, 0.030303030303030304, 0, 0, 0.034482758620689655, 0, 0, 0.034482758620689655, 0, 0, 0.03225806451612903, 0, 0, 0.037037037037037035, 0, 0, 0.03333333333333333, 0, 0, 0.02, 0.04, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0.005434782608695652, 0.08333333333333333, 0.014388489208633094, 0.011494252873563218, 0.005847953216374269, 0, 0.037037037037037035, 0.013333333333333334, 0, 0, 0, 0, 0, 0, 0, 0, 0.01098901098901099, 0, 0.010638297872340425, 0, 0.010869565217391304, 0, 0, 0, 0.030303030303030304, 0.015625, 0.027777777777777776, 0, 0, 0, 0.02127659574468085, 0, 0.023255813953488372, 0, 0, 0.038461538461538464, 0, 0, 0.08333333333333333, 0, 0, 0.038461538461538464, 0, 0, 0.08333333333333333, 0, 0, 0.05, 0, 0, 0.045454545454545456, 0, 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0.020202020202020204, 0.01020408163265306, 0.037037037037037035, 0, 0, 0, 0, 0, 0, 0, 0, 0.02127659574468085, 0, 0, 0, 0, 0, 0.037037037037037035, 0.1 ]
353
0.008999
# Copyright (c) 2006-2007 Open Source Applications Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Modified 2010-11-02, Martin Wendt: # Taken from http://chandlerproject.org/Projects/Davclient # - Fixed set_lock, proppatch # - Added (tag, value) syntax to object_to_etree # - Added checkResponse() import urlparse, httplib, copy, base64, StringIO try: from xml.etree import ElementTree except: from elementtree import ElementTree __all__ = ['DAVClient'] class AppError(Exception): pass def object_to_etree(parent, obj, namespace=''): """This function takes in a python object, traverses it, and adds it to an existing etree object""" if type(obj) is int or type(obj) is float or type(obj) is str: # If object is a string, int, or float just add it obj = str(obj) if obj.startswith('{') is False: ElementTree.SubElement(parent, '{%s}%s' % (namespace, obj)) else: ElementTree.SubElement(parent, obj) elif type(obj) is dict: # If the object is a dictionary we'll need to parse it and send it back recusively for key, value in obj.items(): if key.startswith('{') is False: key_etree = ElementTree.SubElement(parent, '{%s}%s' % (namespace, key)) object_to_etree(key_etree, value, namespace=namespace) else: key_etree = ElementTree.SubElement(parent, key) object_to_etree(key_etree, value, namespace=namespace) elif type(obj) is list: # If the object is a list parse it and send it back recursively for item in obj: object_to_etree(parent, item, namespace=namespace) elif type(obj) is tuple and len(obj) == 2: # If the object is a a tuple, assume (tag_name, value) ElementTree.SubElement(parent, obj[0]).text = str(obj[1]) else: # If it's none of previous types then raise raise TypeError, '%s is an unsupported type' % type(obj) class DAVClient(object): def __init__(self, url='http://localhost:8080'): """Initialization""" self._url = urlparse.urlparse(url) self.headers = {'Host':self._url[1], 'User-Agent': 'python.davclient.DAVClient/0.1'} def _request(self, method, path='', body=None, headers=None): """Internal request method""" self.response = None if headers is None: headers = copy.copy(self.headers) else: new_headers = copy.copy(self.headers) new_headers.update(headers) headers = new_headers # keep request info for later checks self.request = {"method": method, "path": path, "headers": headers, } if self._url.scheme == 'http': self._connection = httplib.HTTPConnection(self._url[1]) elif self._url.scheme == 'https': self._connection = httplib.HTTPSConnection(self._url[1]) else: raise Exception, 'Unsupported scheme' self._connection.request(method, path, body, headers) self.response = self._connection.getresponse() self.response.body = self.response.read() # Try to parse and get an etree try: self._get_response_tree() except: pass def _get_response_tree(self): """Parse the response body into an elementree object""" self.response.tree = ElementTree.fromstring(self.response.body) return self.response.tree def set_basic_auth(self, username, password): """Set basic authentication""" auth = 'Basic %s' % base64.encodestring('%s:%s' % (username, password)).strip() self._username = username self._password = password self.headers['Authorization'] = auth ## HTTP DAV methods ## def get(self, path, headers=None): """Simple get request""" self._request('GET', path, headers=headers) return self.response.body def head(self, path, headers=None): """Basic HEAD request""" self._request('HEAD', path, headers=headers) def put(self, path, body=None, f=None, headers=None): """Put resource with body""" if f is not None: body = f.read() self._request('PUT', path, body=body, headers=headers) def post(self, path, body=None, headers=None): """POST resource with body""" self._request('POST', path, body=body, headers=headers) def mkcol(self, path, headers=None): """Make DAV collection""" self._request('MKCOL', path=path, headers=headers) make_collection = mkcol def delete(self, path, headers=None): """Delete DAV resource""" self._request('DELETE', path=path, headers=headers) def copy(self, source, destination, body=None, depth='infinity', overwrite=True, headers=None): """Copy DAV resource""" # Set all proper headers if headers is None: headers = {'Destination':destination} else: headers['Destination'] = self._url.geturl() + destination if overwrite is False: headers['Overwrite'] = 'F' headers['Depth'] = depth self._request('COPY', source, body=body, headers=headers) def copy_collection(self, source, destination, depth='infinity', overwrite=True, headers=None): """Copy DAV collection. Note: support for the 'propertybehavior' request body for COPY and MOVE has been removed with RFC4918 """ body = '<?xml version="1.0" encoding="utf-8" ?><d:propertybehavior xmlns:d="DAV:"><d:keepalive>*</d:keepalive></d:propertybehavior>' # Add proper headers if headers is None: headers = {} headers['Content-Type'] = 'text/xml; charset="utf-8"' self.copy(source, destination, body=unicode(body, 'utf-8'), depth=depth, overwrite=overwrite, headers=headers) def move(self, source, destination, body=None, depth='infinity', overwrite=True, headers=None): """Move DAV resource""" # Set all proper headers if headers is None: headers = {'Destination':destination} else: headers['Destination'] = self._url.geturl() + destination if overwrite is False: headers['Overwrite'] = 'F' headers['Depth'] = depth self._request('MOVE', source, body=body, headers=headers) def move_collection(self, source, destination, depth='infinity', overwrite=True, headers=None): """Move DAV collection and copy all properties. Note: support for the 'propertybehavior' request body for COPY and MOVE has been removed with RFC4918 """ body = '<?xml version="1.0" encoding="utf-8" ?><d:propertybehavior xmlns:d="DAV:"><d:keepalive>*</d:keepalive></d:propertybehavior>' # Add proper headers if headers is None: headers = {} headers['Content-Type'] = 'text/xml; charset="utf-8"' self.move(source, destination, unicode(body, 'utf-8'), depth=depth, overwrite=overwrite, headers=headers) def propfind(self, path, properties='allprop', namespace='DAV:', depth=None, headers=None): """Property find. If properties arg is unspecified it defaults to 'allprop'""" # Build propfind xml root = ElementTree.Element('{DAV:}propfind') if type(properties) is str: ElementTree.SubElement(root, '{DAV:}%s' % properties) else: props = ElementTree.SubElement(root, '{DAV:}prop') object_to_etree(props, properties, namespace=namespace) tree = ElementTree.ElementTree(root) # Etree won't just return a normal string, so we have to do this body = StringIO.StringIO() tree.write(body) body = body.getvalue() # Add proper headers if headers is None: headers = {} if depth is not None: headers['Depth'] = depth headers['Content-Type'] = 'text/xml; charset="utf-8"' # Body encoding must be utf-8, 207 is proper response self._request('PROPFIND', path, body=unicode('<?xml version="1.0" encoding="utf-8" ?>\n'+body, 'utf-8'), headers=headers) if self.response is not None and hasattr(self.response, 'tree') is True: property_responses = {} for response in self.response.tree._children: property_href = response.find('{DAV:}href') property_stat = response.find('{DAV:}propstat') def parse_props(props): property_dict = {} for prop in props: if prop.tag.find('{DAV:}') is not -1: name = prop.tag.split('}')[-1] else: name = prop.tag if len(prop._children) is not 0: property_dict[name] = parse_props(prop._children) else: property_dict[name] = prop.text return property_dict if property_href is not None and property_stat is not None: property_dict = parse_props(property_stat.find('{DAV:}prop')._children) property_responses[property_href.text] = property_dict return property_responses def proppatch(self, path, set_props=None, remove_props=None, namespace='DAV:', headers=None): """Patch properties on a DAV resource. If namespace is not specified the DAV namespace is used for all properties""" root = ElementTree.Element('{DAV:}propertyupdate') if set_props is not None: prop_set = ElementTree.SubElement(root, '{DAV:}set') for p in set_props: prop_prop = ElementTree.SubElement(prop_set, '{DAV:}prop') object_to_etree(prop_prop, p, namespace=namespace) if remove_props is not None: prop_remove = ElementTree.SubElement(root, '{DAV:}remove') for p in remove_props: prop_prop = ElementTree.SubElement(prop_remove, '{DAV:}prop') object_to_etree(prop_prop, p, namespace=namespace) tree = ElementTree.ElementTree(root) # Etree won't just return a normal string, so we have to do this body = StringIO.StringIO() tree.write(body) body = body.getvalue() # Add proper headers if headers is None: headers = {} headers['Content-Type'] = 'text/xml; charset="utf-8"' self._request('PROPPATCH', path, body=unicode('<?xml version="1.0" encoding="utf-8" ?>\n'+body, 'utf-8'), headers=headers) def set_lock(self, path, owner, locktype='write', lockscope='exclusive', depth=None, headers=None): """Set a lock on a dav resource""" root = ElementTree.Element('{DAV:}lockinfo') object_to_etree(root, {'locktype':locktype, 'lockscope':lockscope, 'owner':{'href':owner}}, namespace='DAV:') tree = ElementTree.ElementTree(root) # Add proper headers if headers is None: headers = {} if depth is not None: headers['Depth'] = depth headers['Content-Type'] = 'text/xml; charset="utf-8"' headers['Timeout'] = 'Infinite, Second-4100000000' # Etree won't just return a normal string, so we have to do this body = StringIO.StringIO() tree.write(body) body = body.getvalue() self._request('LOCK', path, body=unicode('<?xml version="1.0" encoding="utf-8" ?>\n'+body, 'utf-8'), headers=headers) locks = self.response.tree.findall('.//{DAV:}locktoken') lock_list = [] for lock in locks: lock_list.append(lock.getchildren()[0].text.strip().strip('\n')) return lock_list def refresh_lock(self, path, token, headers=None): """Refresh lock with token""" if headers is None: headers = {} headers['If'] = '(<%s>)' % token headers['Timeout'] = 'Infinite, Second-4100000000' self._request('LOCK', path, body=None, headers=headers) def unlock(self, path, token, headers=None): """Unlock DAV resource with token""" if headers is None: headers = {} headers['Lock-Token'] = '<%s>' % token self._request('UNLOCK', path, body=None, headers=headers) def checkResponse(self, status=None): """Raise an error, if self.response doesn't match expected status. Inspired by paste.fixture """ __tracebackhide__ = True res = self.response full_status = "%s %s" % (res.status, res.reason) # Check response Content_Length content_length = long(res.getheader("content-length", 0)) if content_length and len(res.body) != content_length: raise AppError("Mismatch: Content_Length(%s) != len(body)(%s)" % (content_length, len(res.body))) # From paste.fixture: if status == '*': return if isinstance(status, (list, tuple)): if res.status not in status: raise AppError( "Bad response: %s (not one of %s for %s %s)\n%s" % (full_status, ', '.join(map(str, status)), self.request["method"], self.request["path"], res.body)) return if status is None: if res.status >= 200 and res.status < 400: return raise AssertionError( "Bad response: %s (not 200 OK or 3xx redirect for %s %s)\n%s" % (full_status, self.request["method"], self.request["path"], res.body)) if status != res.status: raise AppError("Bad response: %s (not %s)" % (full_status, status)) def checkMultiStatusResponse(self, expect_status=200): """""" if isinstance(expect_status, tuple): pass elif not isinstance(expect_status, list): expect_status = [ expect_status ] expect_status = [int(s) for s in expect_status] self.checkResponse(207) if not hasattr(self.response, 'tree'): raise AppError("Bad response: not XML") responses = {} for response in self.response.tree._children: href = response.find('{DAV:}href') pstat = response.find('{DAV:}propstat') if pstat: stat = pstat.find('{DAV:}status') else: stat = response.find('{DAV:}status') # 'HTTP/1.1 200 OK' -> 200 statuscode = int(stat.text.split(" ", 2)[1]) responses.setdefault(statuscode, []).append(href.text) for statuscode, hrefs in responses.items(): if not statuscode in expect_status: raise AppError("Invalid multistatus %s for %s (expected %s)\n%s" % (statuscode, hrefs, expect_status, responses))
[ "# Copyright (c) 2006-2007 Open Source Applications Foundation\r\n", "#\r\n", "# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n", "# you may not use this file except in compliance with the License.\r\n", "# You may obtain a copy of the License at\r\n", "#\r\n", "# http://www.apache.org/licenses/LICENSE-2.0\r\n", "#\r\n", "# Unless required by applicable law or agreed to in writing, software\r\n", "# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n", "# See the License for the specific language governing permissions and\r\n", "# limitations under the License.\r\n", "#\r\n", "# Modified 2010-11-02, Martin Wendt:\r\n", "# Taken from http://chandlerproject.org/Projects/Davclient\r\n", "# - Fixed set_lock, proppatch\r\n", "# - Added (tag, value) syntax to object_to_etree \r\n", "# - Added checkResponse()\r\n", "\r\n", "import urlparse, httplib, copy, base64, StringIO\r\n", "\r\n", "try:\r\n", " from xml.etree import ElementTree\r\n", "except:\r\n", " from elementtree import ElementTree\r\n", "\r\n", "__all__ = ['DAVClient']\r\n", "\r\n", "class AppError(Exception):\r\n", " pass\r\n", "\r\n", "def object_to_etree(parent, obj, namespace=''):\r\n", " \"\"\"This function takes in a python object, traverses it, and adds it to an existing etree object\"\"\"\r\n", " \r\n", " if type(obj) is int or type(obj) is float or type(obj) is str:\r\n", " # If object is a string, int, or float just add it\r\n", " obj = str(obj)\r\n", " if obj.startswith('{') is False:\r\n", " ElementTree.SubElement(parent, '{%s}%s' % (namespace, obj))\r\n", " else:\r\n", " ElementTree.SubElement(parent, obj)\r\n", " \r\n", " elif type(obj) is dict:\r\n", " # If the object is a dictionary we'll need to parse it and send it back recusively\r\n", " for key, value in obj.items():\r\n", " if key.startswith('{') is False:\r\n", " key_etree = ElementTree.SubElement(parent, '{%s}%s' % (namespace, key))\r\n", " object_to_etree(key_etree, value, namespace=namespace)\r\n", " else:\r\n", " key_etree = ElementTree.SubElement(parent, key)\r\n", " object_to_etree(key_etree, value, namespace=namespace)\r\n", " \r\n", " elif type(obj) is list:\r\n", " # If the object is a list parse it and send it back recursively\r\n", " for item in obj:\r\n", " object_to_etree(parent, item, namespace=namespace)\r\n", " \r\n", " elif type(obj) is tuple and len(obj) == 2:\r\n", " # If the object is a a tuple, assume (tag_name, value)\r\n", " ElementTree.SubElement(parent, obj[0]).text = str(obj[1])\r\n", " \r\n", " else:\r\n", " # If it's none of previous types then raise\r\n", " raise TypeError, '%s is an unsupported type' % type(obj)\r\n", " \r\n", "\r\n", "class DAVClient(object):\r\n", " \r\n", " def __init__(self, url='http://localhost:8080'):\r\n", " \"\"\"Initialization\"\"\"\r\n", " \r\n", " self._url = urlparse.urlparse(url)\r\n", " \r\n", " self.headers = {'Host':self._url[1], \r\n", " 'User-Agent': 'python.davclient.DAVClient/0.1'} \r\n", "\r\n", " \r\n", " def _request(self, method, path='', body=None, headers=None):\r\n", " \"\"\"Internal request method\"\"\"\r\n", " self.response = None\r\n", "\r\n", " if headers is None:\r\n", " headers = copy.copy(self.headers)\r\n", " else:\r\n", " new_headers = copy.copy(self.headers)\r\n", " new_headers.update(headers)\r\n", " headers = new_headers\r\n", "\r\n", " # keep request info for later checks\r\n", " self.request = {\"method\": method,\r\n", " \"path\": path,\r\n", " \"headers\": headers,\r\n", " }\r\n", "\r\n", " if self._url.scheme == 'http':\r\n", " self._connection = httplib.HTTPConnection(self._url[1])\r\n", " elif self._url.scheme == 'https':\r\n", " self._connection = httplib.HTTPSConnection(self._url[1])\r\n", " else:\r\n", " raise Exception, 'Unsupported scheme'\r\n", " \r\n", " self._connection.request(method, path, body, headers)\r\n", " \r\n", " self.response = self._connection.getresponse()\r\n", "\r\n", " self.response.body = self.response.read()\r\n", " \r\n", " # Try to parse and get an etree\r\n", " try:\r\n", " self._get_response_tree()\r\n", " except:\r\n", " pass\r\n", " \r\n", " \r\n", " def _get_response_tree(self):\r\n", " \"\"\"Parse the response body into an elementree object\"\"\"\r\n", " self.response.tree = ElementTree.fromstring(self.response.body)\r\n", " return self.response.tree\r\n", " \r\n", " def set_basic_auth(self, username, password):\r\n", " \"\"\"Set basic authentication\"\"\"\r\n", " auth = 'Basic %s' % base64.encodestring('%s:%s' % (username, password)).strip()\r\n", " self._username = username\r\n", " self._password = password\r\n", " self.headers['Authorization'] = auth\r\n", " \r\n", " ## HTTP DAV methods ##\r\n", " \r\n", " def get(self, path, headers=None):\r\n", " \"\"\"Simple get request\"\"\"\r\n", " self._request('GET', path, headers=headers)\r\n", " return self.response.body\r\n", " \r\n", " def head(self, path, headers=None):\r\n", " \"\"\"Basic HEAD request\"\"\"\r\n", " self._request('HEAD', path, headers=headers)\r\n", " \r\n", " def put(self, path, body=None, f=None, headers=None):\r\n", " \"\"\"Put resource with body\"\"\"\r\n", " if f is not None:\r\n", " body = f.read()\r\n", " \r\n", " self._request('PUT', path, body=body, headers=headers)\r\n", " \r\n", " def post(self, path, body=None, headers=None):\r\n", " \"\"\"POST resource with body\"\"\"\r\n", "\r\n", " self._request('POST', path, body=body, headers=headers)\r\n", " \r\n", " def mkcol(self, path, headers=None):\r\n", " \"\"\"Make DAV collection\"\"\"\r\n", " self._request('MKCOL', path=path, headers=headers)\r\n", " \r\n", " make_collection = mkcol\r\n", " \r\n", " def delete(self, path, headers=None):\r\n", " \"\"\"Delete DAV resource\"\"\"\r\n", " self._request('DELETE', path=path, headers=headers)\r\n", " \r\n", " def copy(self, source, destination, body=None, depth='infinity', overwrite=True, headers=None):\r\n", " \"\"\"Copy DAV resource\"\"\"\r\n", " # Set all proper headers\r\n", " if headers is None:\r\n", " headers = {'Destination':destination}\r\n", " else:\r\n", " headers['Destination'] = self._url.geturl() + destination\r\n", " if overwrite is False:\r\n", " headers['Overwrite'] = 'F'\r\n", " headers['Depth'] = depth\r\n", " \r\n", " self._request('COPY', source, body=body, headers=headers)\r\n", " \r\n", " def copy_collection(self, source, destination, depth='infinity', overwrite=True, headers=None):\r\n", " \"\"\"Copy DAV collection.\r\n", " \r\n", " Note: support for the 'propertybehavior' request body for COPY and MOVE \r\n", " has been removed with RFC4918\r\n", " \"\"\"\r\n", " body = '<?xml version=\"1.0\" encoding=\"utf-8\" ?><d:propertybehavior xmlns:d=\"DAV:\"><d:keepalive>*</d:keepalive></d:propertybehavior>'\r\n", " \r\n", " # Add proper headers\r\n", " if headers is None:\r\n", " headers = {}\r\n", " headers['Content-Type'] = 'text/xml; charset=\"utf-8\"'\r\n", " \r\n", " self.copy(source, destination, body=unicode(body, 'utf-8'), depth=depth, overwrite=overwrite, headers=headers)\r\n", " \r\n", " \r\n", " def move(self, source, destination, body=None, depth='infinity', overwrite=True, headers=None):\r\n", " \"\"\"Move DAV resource\"\"\"\r\n", " # Set all proper headers\r\n", " if headers is None:\r\n", " headers = {'Destination':destination}\r\n", " else:\r\n", " headers['Destination'] = self._url.geturl() + destination\r\n", " if overwrite is False:\r\n", " headers['Overwrite'] = 'F'\r\n", " headers['Depth'] = depth\r\n", " \r\n", " self._request('MOVE', source, body=body, headers=headers)\r\n", " \r\n", " \r\n", " def move_collection(self, source, destination, depth='infinity', overwrite=True, headers=None):\r\n", " \"\"\"Move DAV collection and copy all properties.\r\n", "\r\n", " Note: support for the 'propertybehavior' request body for COPY and MOVE \r\n", " has been removed with RFC4918\r\n", " \"\"\"\r\n", " body = '<?xml version=\"1.0\" encoding=\"utf-8\" ?><d:propertybehavior xmlns:d=\"DAV:\"><d:keepalive>*</d:keepalive></d:propertybehavior>'\r\n", " \r\n", " # Add proper headers\r\n", " if headers is None:\r\n", " headers = {}\r\n", " headers['Content-Type'] = 'text/xml; charset=\"utf-8\"'\r\n", "\r\n", " self.move(source, destination, unicode(body, 'utf-8'), depth=depth, overwrite=overwrite, headers=headers)\r\n", " \r\n", " \r\n", " def propfind(self, path, properties='allprop', namespace='DAV:', depth=None, headers=None):\r\n", " \"\"\"Property find. If properties arg is unspecified it defaults to 'allprop'\"\"\"\r\n", " # Build propfind xml\r\n", " root = ElementTree.Element('{DAV:}propfind')\r\n", " if type(properties) is str:\r\n", " ElementTree.SubElement(root, '{DAV:}%s' % properties)\r\n", " else:\r\n", " props = ElementTree.SubElement(root, '{DAV:}prop')\r\n", " object_to_etree(props, properties, namespace=namespace)\r\n", " tree = ElementTree.ElementTree(root)\r\n", " \r\n", " # Etree won't just return a normal string, so we have to do this\r\n", " body = StringIO.StringIO()\r\n", " tree.write(body)\r\n", " body = body.getvalue()\r\n", " \r\n", " # Add proper headers\r\n", " if headers is None:\r\n", " headers = {}\r\n", " if depth is not None:\r\n", " headers['Depth'] = depth\r\n", " headers['Content-Type'] = 'text/xml; charset=\"utf-8\"'\r\n", " \r\n", " # Body encoding must be utf-8, 207 is proper response\r\n", " self._request('PROPFIND', path, body=unicode('<?xml version=\"1.0\" encoding=\"utf-8\" ?>\\n'+body, 'utf-8'), headers=headers)\r\n", " \r\n", " if self.response is not None and hasattr(self.response, 'tree') is True:\r\n", " property_responses = {}\r\n", " for response in self.response.tree._children:\r\n", " property_href = response.find('{DAV:}href')\r\n", " property_stat = response.find('{DAV:}propstat')\r\n", " \r\n", " def parse_props(props):\r\n", " property_dict = {}\r\n", " for prop in props:\r\n", " if prop.tag.find('{DAV:}') is not -1:\r\n", " name = prop.tag.split('}')[-1]\r\n", " else:\r\n", " name = prop.tag\r\n", " if len(prop._children) is not 0:\r\n", " property_dict[name] = parse_props(prop._children)\r\n", " else:\r\n", " property_dict[name] = prop.text\r\n", " return property_dict\r\n", " \r\n", " if property_href is not None and property_stat is not None:\r\n", " property_dict = parse_props(property_stat.find('{DAV:}prop')._children)\r\n", " property_responses[property_href.text] = property_dict\r\n", " return property_responses\r\n", " \r\n", " def proppatch(self, path, set_props=None, remove_props=None, namespace='DAV:', headers=None):\r\n", " \"\"\"Patch properties on a DAV resource. If namespace is not specified the DAV namespace is used for all properties\"\"\"\r\n", " root = ElementTree.Element('{DAV:}propertyupdate')\r\n", " \r\n", " if set_props is not None:\r\n", " prop_set = ElementTree.SubElement(root, '{DAV:}set')\r\n", " for p in set_props:\r\n", " prop_prop = ElementTree.SubElement(prop_set, '{DAV:}prop')\r\n", " object_to_etree(prop_prop, p, namespace=namespace) \r\n", " if remove_props is not None:\r\n", " prop_remove = ElementTree.SubElement(root, '{DAV:}remove')\r\n", " for p in remove_props:\r\n", " prop_prop = ElementTree.SubElement(prop_remove, '{DAV:}prop')\r\n", " object_to_etree(prop_prop, p, namespace=namespace) \r\n", " \r\n", " tree = ElementTree.ElementTree(root)\r\n", " # Etree won't just return a normal string, so we have to do this\r\n", " body = StringIO.StringIO()\r\n", " tree.write(body)\r\n", " body = body.getvalue()\r\n", "\r\n", " # Add proper headers\r\n", " if headers is None:\r\n", " headers = {}\r\n", " headers['Content-Type'] = 'text/xml; charset=\"utf-8\"'\r\n", " \r\n", " self._request('PROPPATCH', path, body=unicode('<?xml version=\"1.0\" encoding=\"utf-8\" ?>\\n'+body, 'utf-8'), headers=headers)\r\n", " \r\n", " \r\n", " def set_lock(self, path, owner, locktype='write', lockscope='exclusive', depth=None, headers=None):\r\n", " \"\"\"Set a lock on a dav resource\"\"\"\r\n", " root = ElementTree.Element('{DAV:}lockinfo')\r\n", " object_to_etree(root, {'locktype':locktype, 'lockscope':lockscope, 'owner':{'href':owner}}, namespace='DAV:')\r\n", " tree = ElementTree.ElementTree(root)\r\n", " \r\n", " # Add proper headers\r\n", " if headers is None:\r\n", " headers = {}\r\n", " if depth is not None:\r\n", " headers['Depth'] = depth\r\n", " headers['Content-Type'] = 'text/xml; charset=\"utf-8\"'\r\n", " headers['Timeout'] = 'Infinite, Second-4100000000'\r\n", " \r\n", " # Etree won't just return a normal string, so we have to do this\r\n", " body = StringIO.StringIO()\r\n", " tree.write(body)\r\n", " body = body.getvalue()\r\n", " \r\n", " self._request('LOCK', path, body=unicode('<?xml version=\"1.0\" encoding=\"utf-8\" ?>\\n'+body, 'utf-8'), headers=headers)\r\n", " \r\n", " locks = self.response.tree.findall('.//{DAV:}locktoken')\r\n", " lock_list = []\r\n", " for lock in locks:\r\n", " lock_list.append(lock.getchildren()[0].text.strip().strip('\\n'))\r\n", " return lock_list\r\n", " \r\n", "\r\n", " def refresh_lock(self, path, token, headers=None):\r\n", " \"\"\"Refresh lock with token\"\"\"\r\n", " \r\n", " if headers is None:\r\n", " headers = {}\r\n", " headers['If'] = '(<%s>)' % token\r\n", " headers['Timeout'] = 'Infinite, Second-4100000000'\r\n", " \r\n", " self._request('LOCK', path, body=None, headers=headers)\r\n", " \r\n", " \r\n", " def unlock(self, path, token, headers=None):\r\n", " \"\"\"Unlock DAV resource with token\"\"\"\r\n", " if headers is None:\r\n", " headers = {}\r\n", " headers['Lock-Token'] = '<%s>' % token\r\n", " \r\n", " self._request('UNLOCK', path, body=None, headers=headers)\r\n", "\r\n", "\r\n", " def checkResponse(self, status=None):\r\n", " \"\"\"Raise an error, if self.response doesn't match expected status.\r\n", " \r\n", " Inspired by paste.fixture\r\n", " \"\"\"\r\n", " __tracebackhide__ = True\r\n", " res = self.response\r\n", " full_status = \"%s %s\" % (res.status, res.reason)\r\n", "\r\n", " # Check response Content_Length\r\n", " content_length = long(res.getheader(\"content-length\", 0))\r\n", " if content_length and len(res.body) != content_length:\r\n", " raise AppError(\"Mismatch: Content_Length(%s) != len(body)(%s)\" % (content_length, len(res.body)))\r\n", "\r\n", " # From paste.fixture:\r\n", " if status == '*':\r\n", " return\r\n", " if isinstance(status, (list, tuple)):\r\n", " if res.status not in status:\r\n", " raise AppError(\r\n", " \"Bad response: %s (not one of %s for %s %s)\\n%s\"\r\n", " % (full_status, ', '.join(map(str, status)),\r\n", " self.request[\"method\"], self.request[\"path\"], res.body))\r\n", " return\r\n", " if status is None:\r\n", " if res.status >= 200 and res.status < 400:\r\n", " return\r\n", " raise AssertionError(\r\n", " \"Bad response: %s (not 200 OK or 3xx redirect for %s %s)\\n%s\"\r\n", " % (full_status, self.request[\"method\"], self.request[\"path\"],\r\n", " res.body))\r\n", " if status != res.status:\r\n", " raise AppError(\"Bad response: %s (not %s)\" % (full_status, status))\r\n", "\r\n", "\r\n", " def checkMultiStatusResponse(self, expect_status=200):\r\n", " \"\"\"\"\"\"\r\n", " if isinstance(expect_status, tuple):\r\n", " pass\r\n", " elif not isinstance(expect_status, list):\r\n", " expect_status = [ expect_status ]\r\n", " expect_status = [int(s) for s in expect_status]\r\n", " \r\n", " self.checkResponse(207)\r\n", " if not hasattr(self.response, 'tree'):\r\n", " raise AppError(\"Bad response: not XML\")\r\n", " responses = {}\r\n", " for response in self.response.tree._children:\r\n", " href = response.find('{DAV:}href')\r\n", " pstat = response.find('{DAV:}propstat')\r\n", " if pstat:\r\n", " stat = pstat.find('{DAV:}status')\r\n", " else:\r\n", " stat = response.find('{DAV:}status')\r\n", " # 'HTTP/1.1 200 OK' -> 200\r\n", " statuscode = int(stat.text.split(\" \", 2)[1])\r\n", " responses.setdefault(statuscode, []).append(href.text)\r\n", " for statuscode, hrefs in responses.items():\r\n", " if not statuscode in expect_status:\r\n", " raise AppError(\"Invalid multistatus %s for %s (expected %s)\\n%s\" % (statuscode, hrefs, expect_status, responses))\r\n" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0196078431372549, 0, 0, 0.02, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0, 0.03571428571428571, 0, 0, 0.02040816326530612, 0.009523809523809525, 0.16666666666666666, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0.010869565217391304, 0, 0, 0.011235955056179775, 0, 0, 0, 0, 0.07142857142857142, 0, 0, 0, 0, 0.07142857142857142, 0, 0, 0, 0.07142857142857142, 0, 0, 0.015151515151515152, 0.1, 0, 0, 0.16666666666666666, 0, 0, 0.1, 0, 0.1, 0.0425531914893617, 0.013513513513513514, 0, 0.1, 0.014925373134328358, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0196078431372549, 0.1, 0, 0.07142857142857142, 0, 0, 0, 0.1, 0, 0, 0, 0.058823529411764705, 0, 0.1, 0.07142857142857142, 0.02857142857142857, 0, 0, 0, 0.1, 0, 0, 0.011235955056179775, 0, 0, 0, 0.1, 0.03571428571428571, 0.1, 0, 0, 0, 0, 0.1, 0, 0, 0, 0.1, 0, 0, 0, 0, 0.07142857142857142, 0, 0.1, 0, 0, 0, 0, 0.1, 0, 0, 0, 0.1, 0, 0.1, 0, 0, 0, 0.1, 0.009900990099009901, 0, 0, 0, 0.0196078431372549, 0, 0, 0, 0, 0, 0.07142857142857142, 0, 0.1, 0.009900990099009901, 0, 0.1, 0.012195121951219513, 0, 0, 0.007042253521126761, 0.1, 0, 0, 0, 0, 0.1, 0.008333333333333333, 0.1, 0.1, 0.019801980198019802, 0, 0, 0, 0.0196078431372549, 0, 0, 0, 0, 0, 0.07142857142857142, 0, 0.1, 0.1, 0.019801980198019802, 0, 0, 0.012195121951219513, 0, 0, 0.007042253521126761, 0.1, 0, 0, 0, 0, 0, 0.008695652173913044, 0.1, 0.1, 0.020618556701030927, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0.05555555555555555, 0, 0, 0, 0, 0, 0, 0.1, 0, 0.007633587786259542, 0.1, 0.012195121951219513, 0, 0, 0, 0, 0.05555555555555555, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05555555555555555, 0, 0.010752688172043012, 0, 0, 0.1, 0.010101010101010102, 0.007936507936507936, 0, 0.1, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0.011764705882352941, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.007575757575757576, 0.1, 0.1, 0.01904761904761905, 0, 0, 0.04201680672268908, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0.05555555555555555, 0.007874015748031496, 0.1, 0, 0, 0, 0, 0, 0.1, 0, 0.017857142857142856, 0, 0.1, 0, 0, 0, 0, 0.1, 0, 0.1, 0.1, 0.02, 0, 0, 0, 0, 0.1, 0, 0, 0, 0.023255813953488372, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009009009009009009, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.016666666666666666, 0, 0, 0, 0, 0.0425531914893617, 0, 0.07142857142857142, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02040816326530612, 0.007633587786259542 ]
406
0.017165