commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
51ae4907ce5e2c3352ebe8e9a2a7b41a243cab3d
Add note about pattern constraint use
jsontableschema/constraints.py
jsontableschema/constraints.py
import re from dateutil.parser import parse as date_parse from . import compat from . import exceptions class NoConstraintsSupportedMixin(object): '''All constraints raise a ConstraintNotSupported exception''' def _raise_constraint_not_supported(self, field_type, constraint): raise exceptions.ConstraintNotSupported( msg="Field type '{0}' does not support the {1} constraint" .format(field_type, constraint)) def check_minLength(self, value, min_length): self._raise_constraint_not_supported(self.name, 'minLength') def check_maxLength(self, value, max_length): self._raise_constraint_not_supported(self.name, 'maxLength') def check_minimum(self, value, minimum): self._raise_constraint_not_supported(self.name, 'minimum') def check_maximum(self, value, maximum): self._raise_constraint_not_supported(self.name, 'maximum') def check_enum(self, value, enum): self._raise_constraint_not_supported(self.name, 'enum') def check_pattern(self, value, enum): self._raise_constraint_not_supported(self.name, 'pattern') class LengthConstraintMixin(object): ''' Only applicable to sequences like string and array. Will raise TypeError if applied to other types. None applicable types should override and raise ConstraintNotSupported exception. ''' def check_minLength(self, value, min_length): if min_length is not None and len(value) < min_length: raise exceptions.ConstraintError( msg="The field '{0}' must have a minimum length of {1}" .format(self.field_name, min_length)) def check_maxLength(self, value, max_length): if max_length is not None and len(value) > max_length: raise exceptions.ConstraintError( msg="The field '{0}' must have a maximum length of {1}" .format(self.field_name, max_length)) class MinMaxConstraintMixin(object): ''' Only applicable to numbers and date/times. Will raise TypeError if applied to other types. None applicable types should override and raise ConstraintNotSupported exception. ''' def check_minimum(self, value, minimum): if minimum is not None: if self.name in ('date', 'datetime', 'time'): minimum = date_parse(minimum, ignoretz=True) if self.name == 'date': minimum = minimum.date() if self.name == 'time': minimum = minimum.time() if value < minimum: raise exceptions.ConstraintError( msg="The field '{0}' must not be less than {1}" .format(self.field_name, minimum)) def check_maximum(self, value, maximum): if maximum is not None: if self.name in ('date', 'datetime', 'time'): maximum = date_parse(maximum, ignoretz=True) if self.name == 'date': maximum = maximum.date() if self.name == 'time': maximum = maximum.time() if value > maximum: raise exceptions.ConstraintError( msg="The field '{0}' must not be more than {1}" .format(self.field_name, maximum)) class EnumConstraintMixin(object): def _cast_enum(self, enum): ''' Cast each member of the enum array as the same type and format of self. This ensures we're comparing like for like. Don't apply the type's constraints for this cast. ''' return [self.cast(m, skip_constraints=True) for m in enum] def check_enum(self, value, enum): if value not in self._cast_enum(enum): raise exceptions.ConstraintError( msg="The value for field '{0}' must be in the enum array" .format(self.field_name)) class PatternConstraintMixin(object): def check_pattern(self, value, pattern): '''`value` is treated as a string and must match the XML Schema Reg Exp `pattern`.''' # convert to str if necessary if not isinstance(value, compat.str): value = compat.str(value) p = re.compile('^{0}$'.format(pattern)) p_match = p.match(value) if not p_match: raise exceptions.ConstraintError( msg="The value for field '{0}' must match the pattern" .format(self.field_name))
Python
0
@@ -3937,32 +3937,127 @@ Mixin(object):%0A%0A + '''Pattern constraint should be checked as a string value before the value%0A is cast'''%0A%0A def check_pa
f3578096219dbb82572063c8a6dbb75be4da07ac
Update P03_combinePDFs fixed reading encrypted files
books/AutomateTheBoringStuffWithPython/Chapter13/P03_combinePDFs.py
books/AutomateTheBoringStuffWithPython/Chapter13/P03_combinePDFs.py
#! python3 # combinePdfs.py - Combines all the PDFs in the current working directory into # a single PDF. import PyPDF4, os # Get all the PDF filenames. pdfFiles = [] for filename in os.listdir('.'): if filename.endswith(".pdf"): pdfFiles.append(filename) pdfFiles.sort(key=str.lower) pdfWriter = PyPDF4.PdfFileWriter() # Loop through all the PDF files. for filename in pdfFiles: pdfFileObj = open(filename, "rb") pdfReader = PyPDF4.PdfFileReader(pdfFileObj) # Loop through all the pages (except the first) and add them. for pageNum in range(1, pdfReader.numPages): pageObj = pdfReader.getPage(pageNum) pdfWriter.addPage(pageObj) # Save the resulting PDF to a file. pdfOutput = open("allminutes.pdf", "wb") pdfWriter.write(pdfOutput) pdfOutput.close()
Python
0
@@ -476,16 +476,223 @@ ileObj)%0A + if pdfReader.isEncrypted and filename == %22encrypted.pdf%22:%0A pdfReader.decrypt(%22rosebud%22)%0A if pdfReader.isEncrypted and filename == %22encryptedminutes.pdf%22:%0A pdfReader.decrypt(%22swordfish%22)%0A # Lo
7bd54dda24f26662a1525c4b5a76374dbdbc1686
add test for watch
test/test_blacklists.py
test/test_blacklists.py
#!/usr/bin/env python3 # coding=utf-8 import yaml from os import unlink import pytest from blacklists import * from helpers import files_changed, blacklist_integrity_check def test_blacklist_integrity(): errors = blacklist_integrity_check() if len(errors) == 1: pytest.fail(errors[0]) elif len(errors) > 1: pytest.fail("\n\t".join(["{} errors has occurred:".format(len(errors))] + errors)) def test_remote_diff(): file_set = set("abcdefg") true_diff = "a c k p" false_diff = "h j q t" assert files_changed(true_diff, file_set) assert not files_changed(false_diff, file_set) def yaml_validate_existing(cls, filename, parser): return cls(filename, parser).validate() def test_yaml_blacklist(): with open('test_ip.yml', 'w') as y: yaml.dump({ 'Schema': 'yaml_cidr', 'Schema_version': '2019120601', 'items': [ {'ip': '1.2.3.4'}, {'ip': '2.3.4.5', 'disable': True}, {'ip': '3.4.5.6', 'comment': 'comment'}, ]}, y) blacklist = NetWatchlist('test_ip.yml', YAMLParserCIDR) with pytest.raises(ValueError) as e: blacklist.add('1.3.34') with pytest.raises(ValueError) as e: blacklist.add({'ip': '1.3.4'}) with pytest.raises(ValueError) as e: blacklist.add({'ip': '1.2.3.4'}) with pytest.raises(ValueError) as e: blacklist.add({'ip': '2.3.4.5'}) with pytest.raises(ValueError) as e: blacklist.delete({'ip': '34.45.56.67'}) blacklist.add({'ip': '1.3.4.5'}) assert '1.2.3.4' in blacklist.parse() assert '2.3.4.5' not in blacklist.parse() assert '3.4.5.6' in blacklist.parse() blacklist.delete({'ip': '3.4.5.6'}) assert '3.4.5.6' not in blacklist.parse() unlink('test_ip.yml') yaml_validate_existing(NetBlacklist, 'blacklisted_cidrs.yml', YAMLParserCIDR) yaml_validate_existing(NetWatchlist, 'watched_cidrs.yml', YAMLParserCIDR) def test_yaml_asn(): with open('test_asn.yml', 'w') as y: yaml.dump({ 'Schema': 'yaml_asn', 'Schema_version': '2019120601', 'items': [ {'asn': '123'}, {'asn': '234', 'disable': True}, {'asn': '345', 'comment': 'comment'}, ]}, y) blacklist = NetBlacklist('test_asn.yml', YAMLParserASN) with pytest.raises(ValueError) as e: blacklist.add('123') with pytest.raises(ValueError) as e: blacklist.add({'asn': 'invalid'}) with pytest.raises(ValueError) as e: blacklist.add({'asn': '123'}) with pytest.raises(ValueError) as e: blacklist.add({'asn': '234'}) with pytest.raises(ValueError) as e: blacklist.delete({'asn': '9897'}) assert '123' in blacklist.parse() assert '234' not in blacklist.parse() assert '345' in blacklist.parse() blacklist.delete({'asn': '345'}) assert '345' not in blacklist.parse() unlink('test_asn.yml') yaml_validate_existing(NetWatchlist, 'watched_asns.yml', YAMLParserASN) def test_yaml_nses(): with open('test_nses.yml', 'w') as y: yaml.dump({ 'Schema': 'yaml_ns', 'Schema_version': '2019120601', 'items': [ {'ns': 'example.com.'}, {'ns': 'example.net.', 'disable': True}, {'ns': 'example.org.', 'comment': 'comment'}, ]}, y) blacklist = NetBlacklist('test_nses.yml', YAMLParserNS) assert 'example.com.' in blacklist.parse() assert 'EXAMPLE.COM.' not in blacklist.parse() with pytest.raises(ValueError) as e: blacklist.add({'ns': 'example.com.'}) with pytest.raises(ValueError) as e: blacklist.add({'ns': 'EXAMPLE.COM.'}) assert 'example.net.' not in blacklist.parse() assert 'example.org.' in blacklist.parse() blacklist.delete({'ns': 'example.org.'}) assert 'example.org.' not in blacklist.parse() unlink('test_nses.yml') yaml_validate_existing(NetBlacklist, 'blacklisted_nses.yml', YAMLParserNS) yaml_validate_existing(NetWatchlist, 'watched_nses.yml', YAMLParserNS)
Python
0
@@ -4127,28 +4127,816 @@ ed_nses.yml', YAMLParserNS)%0A +%0A%0Adef test_tsv_watchlist():%0A with open('test_watched_keywords.txt', 'w') as t:%0A t.write('%5Ct'.join(%5B'1495006487', 'tripleee', 'one two%5Cn'%5D))%0A t.write('%5Ct'.join(%5B'1495006488', 'tripleee', 'three four%5Cn'%5D))%0A watchlist = Watchlist('test_watched_keywords.txt', TSVDictParser)%0A parsed = list(watchlist.parse())%0A assert 'one two' in parsed%0A assert 'one' not in parsed%0A assert 'three four' in parsed%0A assert 'five six' not in parsed%0A with pytest.raises(ValueError) as e:%0A watchlist.add('one two', who='tripleee', when=1495006489)%0A with pytest.raises(ValueError) as e:%0A watchlist.add('five six')%0A watchlist.add('five six', who='tripleee', when=1495006490)%0A assert 'five six' in watchlist.parse()%0A unlink('test_watched_keywords.txt')%0A
4726cd620bb764ecb7a2b2b5848842a45437f63b
Add `check_status` to config parameter [aws add-on]
jumeaux/addons/final/aws.py
jumeaux/addons/final/aws.py
# -*- coding:utf-8 -*- """For example of config final: - name: jumeaux.addons.final.aws config: table: jumeaux-report bucket: jumeaux-report cache_max_age: 600 """ import logging import shutil from decimal import Decimal import boto3 import os import json from owlmixin import OwlMixin from jumeaux.addons.final import FinalExecutor from jumeaux.models import Report, OutputSummary, FinalAddOnPayload logger = logging.getLogger(__name__) class Config(OwlMixin): def __init__(self, table, bucket, cache_max_age=0, with_zip=True, assumed_role_arn=None, checklist=None): self.table: str = table self.bucket: str = bucket self.cache_max_age: int = cache_max_age self.with_zip = with_zip self.assumed_role_arn = assumed_role_arn self.checklist = checklist class Executor(FinalExecutor): def __init__(self, config: dict): self.config: Config = Config.from_dict(config or {}) def exec(self, payload: FinalAddOnPayload) -> FinalAddOnPayload: report: Report = payload.report output_summary: OutputSummary = payload.output_summary tmp_credential = boto3.client('sts').assume_role( RoleArn=self.config.assumed_role_arn, RoleSessionName='jumeaux_with_aws_add-on' ) if self.config.assumed_role_arn else None # dynamo dynamodb = boto3.resource('dynamodb', **({ 'aws_access_key_id': tmp_credential['Credentials']['AccessKeyId'], 'aws_secret_access_key': tmp_credential['Credentials']['SecretAccessKey'], 'aws_session_token': tmp_credential['Credentials']['SessionToken'] } if tmp_credential else {})) table = dynamodb.Table(self.config.table) item = { "hashkey": report.key, "title": report.title, "one_host": report.summary.one.host, "other_host": report.summary.other.host, "paths": set(report.summary.paths), "same_count": Decimal(report.summary.status.same), "different_count": Decimal(report.summary.status.different), "failure_count": Decimal(report.summary.status.failure), "begin_time": report.summary.time.start, "end_time": report.summary.time.end, "with_zip": self.config.with_zip, "retry_hash": report.retry_hash } if report.description: item['description'] = report.description if self.config.checklist: item['checklist'] = self.config.checklist table.put_item(Item=item) # s3 s3 = boto3.client('s3', **({ 'aws_access_key_id': tmp_credential['Credentials']['AccessKeyId'], 'aws_secret_access_key': tmp_credential['Credentials']['SecretAccessKey'], 'aws_session_token': tmp_credential['Credentials']['SessionToken'] } if tmp_credential else {})) def upload_responses(which: str): dir = f'{output_summary.response_dir}/{report.key}' for file in os.listdir(f'{dir}/{which}'): with open(f'{dir}/{which}/{file}', 'rb') as f: logger.info(f'Put {dir}/{which}/{file}') s3.put_object(Bucket=self.config.bucket, Key=f'jumeaux-results/{report.key}/{which}/{file}', Body=f.read(), CacheControl=f'max-age={self.config.cache_max_age}') # report # TODO: Immutable... d = report.to_dict() del d['trials'] s3.put_object(Bucket=self.config.bucket, Key=f'jumeaux-results/{report.key}/report-without-trials.json', Body=json.dumps(d, ensure_ascii=False)) s3.put_object(Bucket=self.config.bucket, Key=f'jumeaux-results/{report.key}/trials.json', Body=report.trials.to_json()) # details upload_responses("one") upload_responses("other") # zip (${hashkey}.zip) if self.config.with_zip: base_name = f'{output_summary.response_dir}/{report.key}' with open(f'{base_name}/report.json', 'w', encoding=output_summary.encoding) as f: f.write(report.to_pretty_json()) shutil.make_archive(base_name, 'zip', f'{output_summary.response_dir}/{report.key}') zip_fullpath = f'{base_name}.zip' with open(zip_fullpath, 'rb') as f: logger.info(f'Put {zip_fullpath}') s3.put_object(Bucket=self.config.bucket, Key=f'jumeaux-results/{report.key}/{report.key[0:7]}.zip', Body=f.read(), CacheControl=f'max-age={self.config.cache_max_age}') os.remove(zip_fullpath) return payload
Python
0
@@ -2374,16 +2374,52 @@ try_hash +,%0A %22check_status%22: 'todo' %0A
f71ab4f8db04a569543fa5aa6ce05ee9f95479e9
Add Neutron CLI tests to tempest
tempest/cli/__init__.py
tempest/cli/__init__.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shlex import subprocess from oslo.config import cfg import tempest.cli.output_parser from tempest.common import log as logging import tempest.test LOG = logging.getLogger(__name__) cli_opts = [ cfg.BoolOpt('enabled', default=True, help="enable cli tests"), cfg.StrOpt('cli_dir', default='/usr/local/bin/', help="directory where python client binaries are located"), ] CONF = cfg.CONF cli_group = cfg.OptGroup(name='cli', title="cli Configuration Options") CONF.register_group(cli_group) CONF.register_opts(cli_opts, group=cli_group) class ClientTestBase(tempest.test.BaseTestCase): @classmethod def setUpClass(cls): if not CONF.cli.enabled: msg = "cli testing disabled" raise cls.skipException(msg) cls.identity = cls.config.identity super(ClientTestBase, cls).setUpClass() def __init__(self, *args, **kwargs): self.parser = tempest.cli.output_parser super(ClientTestBase, self).__init__(*args, **kwargs) def nova(self, action, flags='', params='', admin=True, fail_ok=False): """Executes nova command for the given action.""" return self.cmd_with_auth( 'nova', action, flags, params, admin, fail_ok) def nova_manage(self, action, flags='', params='', fail_ok=False, merge_stderr=False): """Executes nova-manage command for the given action.""" return self.cmd( 'nova-manage', action, flags, params, fail_ok, merge_stderr) def keystone(self, action, flags='', params='', admin=True, fail_ok=False): """Executes keystone command for the given action.""" return self.cmd_with_auth( 'keystone', action, flags, params, admin, fail_ok) def glance(self, action, flags='', params='', admin=True, fail_ok=False): """Executes glance command for the given action.""" return self.cmd_with_auth( 'glance', action, flags, params, admin, fail_ok) def cinder(self, action, flags='', params='', admin=True, fail_ok=False): """Executes cinder command for the given action.""" return self.cmd_with_auth( 'cinder', action, flags, params, admin, fail_ok) def cmd_with_auth(self, cmd, action, flags='', params='', admin=True, fail_ok=False): """Executes given command with auth attributes appended.""" #TODO(jogo) make admin=False work creds = ('--os-username %s --os-tenant-name %s --os-password %s ' '--os-auth-url %s ' % (self.identity.admin_username, self.identity.admin_tenant_name, self.identity.admin_password, self.identity.uri)) flags = creds + ' ' + flags return self.cmd(cmd, action, flags, params, fail_ok) def check_output(self, cmd, **kwargs): # substitutes subprocess.check_output which is not in python2.6 kwargs['stdout'] = subprocess.PIPE proc = subprocess.Popen(cmd, **kwargs) output = proc.communicate()[0] if proc.returncode != 0: raise CommandFailed(proc.returncode, cmd, output) return output def cmd(self, cmd, action, flags='', params='', fail_ok=False, merge_stderr=False): """Executes specified command for the given action.""" cmd = ' '.join([os.path.join(CONF.cli.cli_dir, cmd), flags, action, params]) LOG.info("running: '%s'" % cmd) cmd = shlex.split(cmd) try: if merge_stderr: result = self.check_output(cmd, stderr=subprocess.STDOUT) else: with open('/dev/null', 'w') as devnull: result = self.check_output(cmd, stderr=devnull) except subprocess.CalledProcessError as e: LOG.error("command output:\n%s" % e.output) raise return result def assertTableStruct(self, items, field_names): """Verify that all items has keys listed in field_names.""" for item in items: for field in field_names: self.assertIn(field, item) class CommandFailed(subprocess.CalledProcessError): # adds output attribute for python2.6 def __init__(self, returncode, cmd, output): super(CommandFailed, self).__init__(returncode, cmd) self.output = output
Python
0.000001
@@ -2959,32 +2959,270 @@ dmin, fail_ok)%0A%0A + def neutron(self, action, flags='', params='', admin=True, fail_ok=False):%0A %22%22%22Executes neutron command for the given action.%22%22%22%0A return self.cmd_with_auth(%0A 'neutron', action, flags, params, admin, fail_ok)%0A%0A def cmd_with
8827f00e5359489aec1bda25c250fc59b02dca43
Add prefix flag and refactor prefix check
ggd/list_files.py
ggd/list_files.py
#------------------------------------------------------------------------------------------------------------- ## Import Statements #------------------------------------------------------------------------------------------------------------- from __future__ import print_function import sys import os import glob import argparse from .utils import conda_root from .utils import get_species from .utils import get_builds from .utils import validate_build from .utils import get_ggd_channels from .utils import get_channeldata_url from .utils import prefix_in_conda from .search import load_json_from_url, search_packages SPECIES_LIST = get_species() #------------------------------------------------------------------------------------------------------------- ## Argument Parser #------------------------------------------------------------------------------------------------------------- def add_list_files(p): c = p.add_parser('list-files', help="List files for an installed ggd recipe", description="Get a list of file(s) for an installed ggd package") c.add_argument("-c", "--channel", default="genomics", choices=get_ggd_channels(), help="The ggd channel of the recipe to find. (Default = genomics)") c.add_argument("-s", "--species", help="(Optional) species recipe is for. Use '*' for any species", choices=SPECIES_LIST) c.add_argument("-g", "--genome-build", help="(Optional) genome build the recipe is for. Use '*' for any genome build.") c.add_argument("-v", "--version", help="(Optional) pattern to match the version of the file desired. Use '*' for any version") c.add_argument("-p", "--pattern", help="(Optional) pattern to match the name of the file desired. To list all files for a ggd package, do not use the -p option") c.add_argument("--prefix", help="(Optional) The full directory path to an conda environment where a ggd recipe is stored. (Only needed if not getting file paths for files in the current conda enviroment)") c.add_argument("name", help="pattern to match recipe name(s)."+ " Ex. `ggd list-files \"hg19-hello*\" -s \"Homo_sapiens\" -g \"hg19\" -p \"out*\"`") c.set_defaults(func=list_files) #------------------------------------------------------------------------------------------------------------- ## Functions/Methods #------------------------------------------------------------------------------------------------------------- def in_ggd_channel(ggd_recipe, ggd_channel): """Method to check if the desired ggd recipe is in the ggd channel in_ggd_channel ============== Method used to identify in the desired pacakge is in the ggd-<channel>. If it is the the species, build, and version is returned. If it is not, then a few alternative package names are provided Parameters: ---------- 1) ggd_recipe: The name of the ggd recipe 2) ggd_channel: The name of the ggd-channel to look in Return: +++++++ 1) species: The species for the ggd-recipe 2) build: The genome build for the ggd-recipe 3) version: The version of the ggd-recipe """ CHANNELDATA_URL = get_channeldata_url(ggd_channel) json_dict = load_json_from_url(CHANNELDATA_URL) package_list = [x[0] for x in search_packages(json_dict, ggd_recipe)] if ggd_recipe in package_list: species = json_dict["packages"][ggd_recipe]["identifiers"]["species"] build = json_dict["packages"][ggd_recipe]["identifiers"]["genome-build"] version = json_dict["packages"][ggd_recipe]["version"] return(species,build,version) else: print("\n\t-> %s is not in the ggd-%s channel" %(ggd_recipe, ggd_channel)) print("\t-> Similar recipes include: \n\t\t- {recipe}".format(recipe="\n\t\t- ".join(package_list[0:5]))) sys.exit(1) def list_files(parser, args): """Main method. Method used to list files for an installed ggd-recipe""" CONDA_ROOT = conda_root() ## Check if prefix paramter is set, and if so check that the prefix is a real conda enviroment if args.prefix: if prefix_in_conda(args.prefix): CONDA_ROOT = args.prefix name = args.name channeldata_species, channeldata_build, channeldata_version = in_ggd_channel(args.name, args.channel) species = args.species if args.species else channeldata_species build = args.genome_build if args.genome_build else channeldata_build if not validate_build(build, species): sys.exit(1) version = args.version if args.version else "*" pattern = args.pattern if args.pattern else "*" path = os.path.join(CONDA_ROOT, "share", "ggd", species, build, name, version, pattern) files = glob.glob(path) if (files): print ("\n".join(files)) else: print("\n\t-> No matching files found for %s" %args.name, file=sys.stderr) sys.exit(1)
Python
0
@@ -1790,16 +1790,30 @@ prefix%22, + default=None, help=%22( @@ -3954,124 +3954,19 @@ T = -conda_root()%0A%0A ## Check if prefix paramter is set, and if so check that the prefix is a real conda enviroment%0A +args.prefix if @@ -3980,20 +3980,20 @@ efix -:%0A if + != None and pre @@ -4021,49 +4021,27 @@ fix) -: %0A CONDA_ROOT = args.prefix%0A + else conda_root()%0A
24b39e457519f45df55a90a6c6c92fa8df5128b7
Disable abc metaclass due to issues with pickling.
sdks/python/apache_beam/utils/urns.py
sdks/python/apache_beam/utils/urns.py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """For internal use only; no backwards-compatibility guarantees.""" import abc import inspect from google.protobuf import wrappers_pb2 from apache_beam.internal import pickler from apache_beam.utils import proto_utils PICKLED_WINDOW_FN = "beam:windowfn:pickled_python:v0.1" GLOBAL_WINDOWS_FN = "beam:windowfn:global_windows:v0.1" FIXED_WINDOWS_FN = "beam:windowfn:fixed_windows:v0.1" SLIDING_WINDOWS_FN = "beam:windowfn:sliding_windows:v0.1" SESSION_WINDOWS_FN = "beam:windowfn:session_windows:v0.1" PICKLED_CODER = "beam:coder:pickled_python:v0.1" PICKLED_TRANSFORM = "beam:ptransform:pickled_python:v0.1" FLATTEN_TRANSFORM = "beam:ptransform:flatten:v0.1" READ_TRANSFORM = "beam:ptransform:read:v0.1" WINDOW_INTO_TRANSFORM = "beam:ptransform:window_into:v0.1" PICKLED_SOURCE = "beam:source:pickled_python:v0.1" class RunnerApiFn(object): """Abstract base class that provides urn registration utilities. A class that inherits from this class will get a registration-based from_runner_api and to_runner_api method that convert to and from beam_runner_api_pb2.SdkFunctionSpec. Additionally, register_pickle_urn can be called from the body of a class to register serialization via pickling. """ __metaclass__ = abc.ABCMeta _known_urns = {} @abc.abstractmethod def to_runner_api_parameter(self, unused_context): """Returns the urn and payload for this Fn. The returned urn(s) should be registered with `register_urn`. """ pass @classmethod def register_urn(cls, urn, parameter_type, fn=None): """Registeres a urn with a constructor. For example, if 'beam:fn:foo' had paramter type FooPayload, one could write `RunnerApiFn.register_urn('bean:fn:foo', FooPayload, foo_from_proto)` where foo_from_proto took as arguments a FooPayload and a PipelineContext. This function can also be used as a decorator rather than passing the callable in as the final parameter. A corresponding to_runner_api_parameter method would be expected that returns the tuple ('beam:fn:foo', FooPayload) """ def register(fn): cls._known_urns[urn] = parameter_type, fn return staticmethod(fn) if fn: # Used as a statement. register(fn) else: # Used as a decorator. return register @classmethod def register_pickle_urn(cls, pickle_urn): """Registers and implements the given urn via pickling. """ inspect.currentframe().f_back.f_locals['to_runner_api_parameter'] = ( lambda self, context: ( pickle_urn, wrappers_pb2.BytesValue(value=pickler.dumps(self)))) cls.register_urn( pickle_urn, wrappers_pb2.BytesValue, lambda proto, unused_context: pickler.loads(proto.value)) def to_runner_api(self, context): """Returns an SdkFunctionSpec encoding this Fn. Prefer overriding self.to_runner_api_parameter. """ from apache_beam.portability.api import beam_runner_api_pb2 urn, typed_param = self.to_runner_api_parameter(context) return beam_runner_api_pb2.SdkFunctionSpec( spec=beam_runner_api_pb2.FunctionSpec( urn=urn, parameter=proto_utils.pack_Any(typed_param))) @classmethod def from_runner_api(cls, fn_proto, context): """Converts from an SdkFunctionSpec to a Fn object. Prefer registering a urn with its parameter type and constructor. """ parameter_type, constructor = cls._known_urns[fn_proto.spec.urn] return constructor( proto_utils.unpack_Any(fn_proto.spec.parameter, parameter_type), context)
Python
0
@@ -1999,16 +1999,97 @@ %22%22%22%0A%0A + # TODO(robertwb): Figure out issue with dill + local classes + abc metaclass%0A # __metac
c958fed31b7ab7fedb51c23eee40f781b6e09075
Fix error checking in git mirroring commands
git/mirror/gitmirror.py
git/mirror/gitmirror.py
#!/usr/bin/env python """ Mirror repos between github and bitbucket for safety. Uses a bare checkout and a mirror push. Assumes you have git access via SSH key and that the key passphrase is held in an agent. EXAMPLES Mirror Github neilhwatson/nustuff to Bitbucket neilhwatson/nustuff gitmirror.py --git2bit neilhwatson/nustuff neilhwatson/nustuff Mirror Bitbucket neilhwatson/nustuff to Github neilhwatson/nustuff gitmirror.py --bit2git neilhwatson/nustuff neilhwatson/nustuff AUTHOR Neil H. Watson, http://watson-wilson.ca, neil@watson-wilson.ca LICENSE and COPYRIGHT The MIT License (MIT) Copyright (c) 2017 Neil H Watson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import argparse import distutils.core import os import subprocess import re # # Subs # def get_cli_args(): """Process cli args""" parser = argparse.ArgumentParser( description="Mirror git repos between Github and Bitbucket." , epilog="Use pydoc ./%(prog)s for more detail.") parser.add_argument( '--version' , action='version' , version='%(prog)s 1.0') # Mutually exclusive args, but one or the other is required. group = parser.add_mutually_exclusive_group(required=True) group.add_argument( '--git2bit', '-g2b' , action='store_true' # Means arg.add is True or False. , help='Mirror from Github to Bitbucket.') group.add_argument( '--bit2git', '-b2g' , action='store_true' , help='Mirror from Bitbucket to Github.') parser.add_argument( 'src' , help='Repo path to mirror source.') parser.add_argument( 'dest' , help='Repo path to mirror destination.') arg = parser.parse_args() validate_args(arg) return arg def validate_args(arg): """Validate command line arguments.""" for next_arg in [ arg.src, arg.dest ]: if not re.search('(?x) \A [\w\-/]+ \Z', str(next_arg)): raise TypeError("Invalid repo syntax.") return def runcmd(cmd, workdir): proc = subprocess.Popen(cmd, cwd=workdir, stderr=subprocess.PIPE, shell=True) exit_code = proc.wait() if exit_code != 0: raise proc.stderr return # # Main matter unless this module was called from another program. # def run(): """Start the program when run from CLI""" biturl='git@bitbucket.org:' giturl='git@github.com:' arg = get_cli_args() # Determine src and dest URLs if arg.git2bit: arg.src =giturl+arg.src +'.git' arg.dest=biturl+arg.dest+'.git' if arg.bit2git: arg.src =biturl+arg.src +'.git' arg.dest=giturl+arg.dest+'.git' # Prep work dir workdir="/tmp/gitmirror"+str(os.getpid()) distutils.dir_util.mkpath(workdir) clone= 'git clone --bare ' +arg.src runcmd(clone,workdir) # Get cloned dir clonedir = workdir + '/' + re.search('(?x) ( [^\/]+ \.git)\Z', arg.src).group(1) mirror='git push --mirror ' +arg.dest runcmd(mirror,clonedir) # Clean up distutils.dir_util.remove_tree(workdir) return if __name__ == '__main__': run()
Python
0.000003
@@ -3097,12 +3097,14 @@ ess. -PIPE +STDOUT , sh @@ -3129,16 +3129,21 @@ t_code = + int( proc.wa @@ -3146,16 +3146,18 @@ c.wait() + ) %0A if @@ -3185,16 +3185,27 @@ raise + Exception( proc.st @@ -3208,16 +3208,18 @@ c.stderr + ) %0A%0A re
26ce46c14f3fc5d38253617822974c21b488dd95
Set priority to 0, set viability to permanently null. Add test to ensure keyring can render itself. Ref #358.
keyring/backends/chainer.py
keyring/backends/chainer.py
""" Implementation of a keyring backend chainer. This is specifically not a viable backend, and must be instantiated directly with a list of ordered backends. """ from __future__ import absolute_import from ..backend import KeyringBackend class ChainerBackend(KeyringBackend): def __init__(self, backends): self.backends = list(backends) def get_password(self, service, username): for backend in self.backends: password = backend.get_password(service, username) if password is not None: return password def set_password(self, service, username, password): for backend in self.backends: try: return backend.set_password(service, username, password) except NotImplementedError: pass def delete_password(self, service, username): for backend in self.backends: try: return backend.delete_password(service, username) except NotImplementedError: pass def get_credential(self, service, username): for backend in self.backends: credential = backend.get_credential(service, username) if credential is not None: return credential
Python
0
@@ -271,24 +271,165 @@ ngBackend):%0A + %22%22%22%0A %3E%3E%3E ChainerBackend(())%0A %3Ckeyring.backends.chainer.ChainerBackend object at ...%3E%0A %22%22%22%0A%0A priority = 0%0A viable = False%0A%0A def __in
239ad90ccc3faff0bfaaa5059305a93649978e93
this is duplicated
simplui/container.py
simplui/container.py
# ---------------------------------------------------------------------- # Copyright (c) 2009 Tristam MacDonald # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of DarkCoda nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------- import pyglet from pyglet.gl import * from .widget import Widget from .geometry import Rect class Container(Widget): """Base class for all GUI containers, also usable by itself""" def __init__(self, **kwargs): """Create a container Keyword arguments: name -- unique widget identifier children -- list of child elements to be added to this container """ Widget.__init__(self, **kwargs) self.children = [] children = kwargs.get('children', []) for c in children: self.add(c) def _get_visible(self): return self._visible def _set_visible(self, visible): Widget._set_visible(self, visible) for c in self.children: c.visible = visible visible = property(_get_visible, _set_visible) def update_global_coords(self): Widget.update_global_coords(self) for c in self.children: c.update_global_coords() def update_elements(self): Widget.update_elements(self) for c in self.children: c.update_elements() def update_global_coords(self): Widget.update_global_coords(self) for c in self.children: c.update_global_coords() def update_theme(self, theme): Widget.update_theme(self, theme) for c in self.children: c.update_theme(theme) def update_batch(self, batch, group): Widget.update_batch(self, batch, group) for c in self.children: c.update_batch(batch, group) def update_names(self, oldname=None): Widget.update_names(self, oldname) for c in self.children: c.update_names(oldname) def remove_names(self): Widget.remove_names(self) for c in self.children: c.remove_names() def add(self, child): self.children.append(child) child.parent = self child.update_theme(self.theme) child.update_batch(self._batch, self._group) self.find_root().update_layout() child.update_names() def remove(self, child): child.remove_names() self.children.remove(child) child.parent = None child.update_batch(None, None) self.find_root().update_layout() def on_mouse_press(self, x, y, button, modifiers): Widget.on_mouse_press(self, x, y, button, modifiers) r = self.clip_rect() for c in self.children: if r.intersect(c.bounds()).hit_test(x, y): c.on_mouse_press(x, y, button, modifiers) def on_mouse_drag(self, x, y, dx, dy, button, modifiers): Widget.on_mouse_drag(self, x, y, dx, dy, button, modifiers) for c in self.children: c.on_mouse_drag(x, y, dx, dy, button, modifiers) def on_mouse_release(self, x, y, button, modifiers): Widget.on_mouse_release(self, x, y, button, modifiers) r = self.clip_rect() for c in self.children: if r.intersect(c.bounds()).hit_test(x, y): c.on_mouse_release(x, y, button, modifiers) def on_mouse_scroll(self, x, y, scroll_x, scroll_y): Widget.on_mouse_scroll(self, x, y, scroll_x, scroll_y) r = self.clip_rect() for c in self.children: if r.intersect(c.bounds()).hit_test(x, y): c.on_mouse_scroll(x, y, scroll_x, scroll_y) def on_key_press(self, symbol, modifiers): Widget.on_key_press(self, symbol, modifiers) for c in self.children: c.on_key_press(symbol, modifiers) def on_text(self, text): Widget.on_text(self, text) for c in self.children: c.on_text(text) def on_text_motion(self, motion, select=False): Widget.on_text_motion(self, motion, select) for c in self.children: c.on_text_motion(motion, select) def clip_rect(self): return Rect(self._gx, self._gy, self.w, self.h) class SingleContainer(Container): """Utility base class for containers restricted to a single child""" def __init__(self, **kwargs): if 'children' in kwargs: del kwargs['children'] Container.__init__(self, **kwargs) self._content = None def _get_content(self): return self._content def _set_content(self, content): if self._content: Container.remove(self, self._content) self._content = content if self._content: Container.add(self, self._content) self.find_root().update_layout() content = property(_get_content, _set_content) def add(self, other): raise UserWarning('add to the content element') def remove(self, other): raise UserWarning('remove from the content element') def determine_size(self): if self._content: self._content.determine_size() self._pref_size = self._content._pref_size def reset_size(self, size): Widget.reset_size(self, size) if self._content: self._content.reset_size(size)
Python
0.999434
@@ -2666,136 +2666,8 @@ )%0A%09%0A -%09def update_global_coords(self):%0A%09%09Widget.update_global_coords(self)%0A%09%09%0A%09%09for c in self.children:%0A%09%09%09c.update_global_coords()%0A%09%0A %09def
d6ef946df0497868de9d035ab0d56d0d828c2be1
Disable failing assertion in python test
tests/query_test/test_hdfs_fd_caching.py
tests/query_test/test_hdfs_fd_caching.py
# Copyright 2012 Cloudera Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import pytest from copy import copy from subprocess import call from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_test_suite import * from tests.common.test_vector import * from tests.common.impala_cluster import ImpalaCluster from tests.common.test_dimensions import create_exec_option_dimension from tests.common.skip import SkipIfS3 from tests.util.shell_util import exec_process class TestHdfsFdCaching(ImpalaTestSuite): """ This test suite tests the behavior of HDFS file descriptor caching by evaluating the metrics exposed by the Impala daemon. """ NUM_ROWS = 10000 @classmethod def file_format_constraint(cls, v): return v.get_value('table_format').file_format in ["parquet"] @classmethod def add_test_dimensions(cls): super(TestHdfsFdCaching, cls).add_test_dimensions() cls.TestMatrix.add_constraint(cls.file_format_constraint) @classmethod def get_workload(cls): return 'functional-query' def setup_method(self, method): self.cleanup_db("cachefd") self.client.execute("create database cachefd") self.client.execute("create table cachefd.simple(id int, col1 int, col2 int) " "stored as parquet") buf = "insert into cachefd.simple values" self.client.execute(buf + ", ".join(["({0},{0},{0})".format(x) for x in range(self.NUM_ROWS)])) def teardown_method(self, methd): self.cleanup_db("cachedfd") @pytest.mark.execute_serially def test_simple_scan(self, vector): # One table, one file, one handle num_handles_before = self.cached_handles() self.execute_query("select * from cachefd.simple limit 1", vector=vector) num_handles_after = self.cached_handles() # Should have at least one more handle cached and not more than three more assert num_handles_after >= (num_handles_before + 1) assert num_handles_after <= (num_handles_before + 3) # No open handles if scanning is finished assert self.outstanding_handles() == 0 # No change when reading the table again for x in range(10): self.execute_query("select * from cachefd.simple limit 1", vector=vector) assert num_handles_after == self.cached_handles() assert self.outstanding_handles() == 0 def cached_handles(self): return self.get_agg_metric("impala-server.io.mgr.num-cached-file-handles") def outstanding_handles(self): return self.get_agg_metric("impala-server.io.mgr.num-file-handles-outstanding") def get_agg_metric(self, key, fun=sum): cluster = ImpalaCluster() return fun([s.service.get_metric_value(key) for s in cluster.impalads])
Python
0.000001
@@ -2753,24 +2753,263 @@ vector)%0A%0A + # TODO This assertion fails reliably in the Kudu feature branch build for reasons yet%0A # unknown, since it seems unrelated to other changes. Once the root cause for the%0A # failure is known this assertion should be uncommented.%0A # assert num_
56598776ce6588445cf0d76b5faaea507d5d1405
Update Labels for consistency
github3/issues/label.py
github3/issues/label.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from json import dumps from ..decorators import requires_auth from ..models import GitHubCore class Label(GitHubCore): """The :class:`Label <Label>` object. Succintly represents a label that exists in a repository. See also: http://developer.github.com/v3/issues/labels/ """ def _update_attributes(self, label): self._api = self._get_attribute(label, 'url') #: Color of the label, e.g., 626262 self.color = self._get_attribute(label, 'color') #: Name of the label, e.g., 'bug' self.name = self._get_attribute(label, 'name') self._uniq = self._api def _repr(self): return '<Label [{0}]>'.format(self) def __str__(self): return self.name @requires_auth def delete(self): """Delete this label. :returns: bool """ return self._boolean(self._delete(self._api), 204, 404) @requires_auth def update(self, name, color): """Update this label. :param str name: (required), new name of the label :param str color: (required), color code, e.g., 626262, no leading '#' :returns: bool """ json = None if name and color: if color[0] == '#': color = color[1:] json = self._json(self._patch(self._api, data=dumps({ 'name': name, 'color': color})), 200) if json: self._update_attributes(json) return True return False
Python
0
@@ -17,16 +17,62 @@ f-8 -*-%0A +%22%22%22Module containing the logic for labels.%22%22%22%0A from __f @@ -236,52 +236,9 @@ %22%22%22 -The :class:%60Label %3CLabel%3E%60 object. Succintly +A rep @@ -243,17 +243,24 @@ epresent -s +ation of a label @@ -264,25 +264,24 @@ bel -that%0A exists i +object defined o n a @@ -357,309 +357,389 @@ ls/%0A +%0A -%22%22%22%0A def _update_attributes(self, label):%0A self._api = self._get_attribute(label, 'url')%0A #: Color of the +This object has the following attributes::%0A%0A .. attribute:: color%0A%0A The hexadecimeal representation of the background color of this label.%0A%0A .. attribute:: name%0A%0A The name (display label) for this label -, e.g., 626262%0A self.color = self._get_attribute(label, 'color')%0A #: Name of the label, e.g., 'bug'%0A self.name = self._get_attribute( +.%0A %22%22%22%0A%0A def _update_attributes(self, label):%0A self._api = label%5B'url'%5D%0A self.color = label%5B'color'%5D%0A self.name = label -, +%5B 'name' -)%0A +%5D %0A @@ -963,32 +963,118 @@ :returns: +%0A True if successfully deleted, False otherwise%0A :rtype:%0A bool%0A %22%22 @@ -1249,16 +1249,28 @@ tr name: +%0A (requir @@ -1321,16 +1321,28 @@ r color: +%0A (requir @@ -1404,16 +1404,102 @@ returns: +%0A True if successfully updated, False otherwise%0A :rtype:%0A bool%0A
50ad60f45466a3e547174130fa06107f47b1a77b
Make EMPTY actually send an empty string
requestbuilder/__init__.py
requestbuilder/__init__.py
# Copyright (c) 2012, Eucalyptus Systems, Inc. # # Permission to use, copy, modify, and/or distribute this software for # any purpose with or without fee is hereby granted, provided that the # above copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import argparse __version__ = '0.0' class Arg(object): ''' A command line argument. Positional and keyword arguments to __init__ are the same as those to argparse.ArgumentParser.add_argument. The value specified by the 'dest' argument (or the one inferred if none is specified) is used as the name of the parameter to server queries unless send=False is also supplied. ''' def __init__(self, *pargs, **kwargs): if 'route_to' in kwargs: self.route = kwargs.pop('route_to') self.pargs = pargs self.kwargs = kwargs def __eq__(self, other): if isinstance(other, Arg): return sorted(self.pargs) == sorted(other.pargs) return False class MutuallyExclusiveArgList(list): ''' Pass Args as positional arguments to __init__ to create a set of command line arguments that are mutually exclusive. If the first argument passed to __init__ is True then the user must specify exactly one of them. Example: MutuallyExclusiveArgList(Arg('--one'), Arg('--two')) ''' def __init__(self, *args): if len(args) > 0 and isinstance(args[0], bool): self.required = args[0] list.__init__(self, args[1:]) else: self.required = False list.__init__(self, args) class Filter(object): ''' An AWS API filter. For APIs that support filtering by name/value pairs, adding a Filter to a request's list of filters will allow a user to send an output filter to the server with '--filter name=value' at the command line. The value specified by the 'dest' argument (or the 'name' argument, if none is given) is used as the name of a filter in queries. ''' def __init__(self, name, type=str, choices=None, help=None): self.name = name self.type = type self.choices = choices self.help = help def matches_argval(self, argval): return argval.startswith(self.name + '=') def convert(self, argval): ''' Given an argument to --filter of the form "<name>=<value>", convert the value to the appropriate type by calling self.type on it, then return a (name, converted_value) tuple. If the value's type conversion doesn't work then an ArgumentTypeError will result. If the conversion succeeds but does not appear in self.choices when it exists, an ArgumentTypeError will result as well. ''' if '=' not in argval: msg = 'filter {0} must have format "NAME=VALUE"'.format(argval) raise argparse.ArgumentTypeError(msg) (name, value_str) = argval.split('=', 1) try: value = self.type(value_str) except ValueError: msg = 'filter {0} must have type {1}'.format( value_str, self.type.__name__) raise argparse.ArgumentTypeError(msg) if self.choices and value not in self.choices: msg = 'filter value {0} must match one of {1}'.format( value, ', '.join([str(choice) for choice in self.choices])) raise argparse.ArgumentTypeError(msg) return (name, value) class GenericTagFilter(Filter): ''' A filter that accepts "tag:<key>=<value>" values ''' def matches_argval(self, argval): return argval.startswith('tag:') and '=' in argval ########## SINGLETONS ########## # Indicates a parameter that should be sent to the server without a value EMPTY = type('EMPTY', (), {'__repr__': lambda self: "''"})() # Constants (enums?) used for arg routing CONNECTION = type('CONNECTION', (), {'__repr__': lambda self: 'CONNECTION'})() PARAMS = type('PARAMS', (), {'__repr__': lambda self: 'PARAMS'})() # Common args for query authentication STD_AUTH_ARGS = [ Arg('-I', '--access-key-id', dest='aws_access_key_id', metavar='KEY_ID', route_to=CONNECTION), Arg('-S', '--secret-key', dest='aws_secret_access_key', metavar='KEY', route_to=CONNECTION)]
Python
0.000066
@@ -4367,16 +4367,72 @@ lf: %22''%22 +,%0A '__str__': lambda self: '' %7D)()%0A%0A#
9626736dc94c85987472b7d7ad5951363883a5dc
Disable Facter plugin if yaml import fails
JsonStats/FetchStats/Plugins/Facter.py
JsonStats/FetchStats/Plugins/Facter.py
import datetime from JsonStats.FetchStats import Fetcher import os.path class Facter(Fetcher): """ Facter plugin for `jsonstats`. Returns key-value pairs of general system information provided by the `facter` command. Load conditions: * Plugin will load if the `facter` command is found Operating behavior: * Plugin will call `facter` with the `-p` (return `puppet` facts) option if the `puppet` command is on the system. Dependencies: * Facter - http://puppetlabs.com/blog/facter-part-1-facter-101 Optional dependencies: * Puppet - http://puppetlabs.com/puppet/what-is-puppet """ import yaml def __init__(self): self.context = 'facter' self._cmd = 'facter --yaml 2>/dev/null' if os.path.exists('/usr/bin/puppet'): self._cmd = 'facter -p --yaml 2>/dev/null' self._load_data() def _load_data(self): self._refresh_time = datetime.datetime.utcnow() try: output = self._exec(self._cmd) self.facts = self.yaml.load(output) self._loaded(True) except OSError, e: # Couldn't find facter command, most likely self._loaded(False, msg=str(e)) except Exception, e: # Something else did indeed go wrong self._loaded(False, msg=str(e)) def dump(self): # poor mans cache, refresh cache in an hour if (datetime.datetime.utcnow() - datetime.timedelta(minutes=5)) > self._refresh_time: self._load_data() return self.facts def dump_json(self): return self.json.dumps(self.dump())
Python
0
@@ -538,16 +538,61 @@ cter-101 +%0A * PyYAML - http://pyyaml.org/wiki/PyYAML %0A%0A Op @@ -688,19 +688,76 @@ -import yaml +try:%0A import yaml%0A except ImportError:%0A yaml = None %0A%0A @@ -859,16 +859,126 @@ /null'%0A%0A + if self.yaml is None:%0A self._loaded(False, msg='No module named yaml')%0A return%0A%0A
4f4216b56bef952fda37fb2ef35765bb53cba54a
Result.__repr__ should indicate properties in proper case
couchbase/_bootstrap.py
couchbase/_bootstrap.py
# # Copyright 2013, Couchbase, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module contains the core functionality of '_libcouchbase'. In short, this contains the convergence between the C module and code written in Python. While the _libcouchbase module should never be used directly, in the off chance that this does happen, ensure this module is loaded as well before anything is done, otherwise Bad Things May Happen. Additionally, this module contains python functions used exclusively from C. They are here because it was quicker to write them in Python than it was in C. Do not touch this file at all. You have been warned """ import json import pickle import couchbase.exceptions as E import couchbase._libcouchbase as C from couchbase.items import ItemCollection, ItemOptionDict, ItemSequence def _result__repr__(self): """ This is used as the `__repr__` function for the :class:`Result` """ details = [] flags = self.__class__._fldprops rcstr = "RC=0x{0:X}".format(self.rc) if self.rc != 0: rcstr += "[{0}]".format(self.errstr) details.append(rcstr) if flags & C.PYCBC_RESFLD_KEY and hasattr(self, 'key'): details.append("Key={0}".format(repr(self.key))) if flags & C.PYCBC_RESFLD_VALUE and hasattr(self, 'value'): details.append("Value={0}".format(repr(self.value))) if flags & C.PYCBC_RESFLD_CAS and hasattr(self, 'cas'): details.append("CAS=0x{cas:x}".format(cas=self.cas)) if flags & C.PYCBC_RESFLD_CAS and hasattr(self, 'flags'): details.append("Flags=0x{flags:x}".format(flags=self.flags)) if flags & C.PYCBC_RESFLD_HTCODE and hasattr(self, "http_status"): details.append("HTTP={0}".format(self.http_status)) if flags & C.PYCBC_RESFLD_URL and hasattr(self, "url"): details.append("URL={0}".format(self.url)) ret = "{0}<{1}>".format(self.__class__.__name__, ', '.join(details)) return ret def _observeinfo__repr__(self): constants = ('OBS_PERSISTED', 'OBS_FOUND', 'OBS_NOTFOUND', 'OBS_LOGICALLY_DELETED') flag_str = '' for c in constants: if self.flags == getattr(C, c): flag_str = c break fmstr = ("{cls}<Status=[{status_s} (0x{flags:X})], " "Master={is_master}, " "CAS=0x{cas:X}>") ret = fmstr.format(cls=self.__class__.__name__, status_s=flag_str, flags=self.flags, is_master=bool(self.from_master), cas=self.cas) return ret def _json_encode_wrapper(*args): return json.dumps(*args, ensure_ascii=False, separators=(',', ':')) class FMT_AUTO_object_not_a_number(object): pass # TODO: Make this more readable and have PEP8 ignore it. _FMT_AUTO = FMT_AUTO_object_not_a_number() MAX_URI_LENGTH = 2048 def _view_path_helper(options): # Assume options are already encoded! if not options: return '', '' post_body = '' encoded = options.encoded if len(encoded) > MAX_URI_LENGTH: encoded, post_body = options._long_query_encoded return encoded, post_body def run_init(m): m._init_helpers(result_reprfunc=_result__repr__, fmt_utf8_flags=C.FMT_UTF8, fmt_bytes_flags=C.FMT_BYTES, fmt_json_flags=C.FMT_JSON, fmt_pickle_flags=C.FMT_PICKLE, pickle_encode=pickle.dumps, pickle_decode=pickle.loads, json_encode=_json_encode_wrapper, json_decode=json.loads, lcb_errno_map=E._LCB_ERRNO_MAP, misc_errno_map=E._EXCTYPE_MAP, default_exception=E.CouchbaseError, obsinfo_reprfunc=_observeinfo__repr__, itmcoll_base_type=ItemCollection, itmopts_dict_type=ItemOptionDict, itmopts_seq_type=ItemSequence, fmt_auto=_FMT_AUTO, view_path_helper=_view_path_helper) run_init(C) C.FMT_AUTO = _FMT_AUTO
Python
0.999998
@@ -1523,10 +1523,10 @@ = %22 -RC +rc =0x%7B @@ -1725,17 +1725,17 @@ append(%22 -K +k ey=%7B0%7D%22. @@ -1847,17 +1847,17 @@ append(%22 -V +v alue=%7B0%7D @@ -1969,19 +1969,19 @@ append(%22 -CAS +cas =0x%7Bcas: @@ -2093,17 +2093,17 @@ append(%22 -F +f lags=0x%7B @@ -2238,12 +2238,19 @@ nd(%22 -HTTP +http_status =%7B0%7D @@ -2362,19 +2362,19 @@ append(%22 -URL +url =%7B0%7D%22.fo
9a4b3dbaba3f32a27f4cc5eaf74ef9b44a051472
Fix some formatting
tests/integration/cloud/providers/test_vmware.py
tests/integration/cloud/providers/test_vmware.py
# -*- coding: utf-8 -*- ''' :codeauthor: :email:`Megan Wilhite <mwilhite@saltstack.com>` ''' # Import Python Libs from __future__ import absolute_import import os import random import string # Import Salt Libs from salt.config import cloud_providers_config, cloud_config # Import Salt Testing LIbs from tests.support.case import ShellCase from tests.support.paths import FILES from tests.support.helpers import expensiveTest from salt.ext.six.moves import range def __random_name(size=6): ''' Generates a radom cloud instance name ''' return 'CLOUD-TEST-' + ''.join( random.choice(string.ascii_uppercase + string.digits) for x in range(size) ) # Create the cloud instance name to be used throughout the tests INSTANCE_NAME = __random_name() PROVIDER_NAME = 'vmware' TIMEOUT = 500 class VMWareTest(ShellCase): ''' Integration tests for the vmware cloud provider in Salt-Cloud ''' @expensiveTest def setUp(self): ''' Sets up the test requirements ''' # check if appropriate cloud provider and profile files are present profile_str = 'vmware-config' providers = self.run_cloud('--list-providers') if profile_str + ':' not in providers: self.skipTest( 'Configuration file for {0} was not found. Check {0}.conf files ' 'in tests/integration/files/conf/cloud.*.d/ to run these tests.' .format(PROVIDER_NAME) ) # check if user, password, url and provider are present config = cloud_providers_config( os.path.join( FILES, 'conf', 'cloud.providers.d', PROVIDER_NAME + '.conf' ) ) user = config[profile_str][PROVIDER_NAME]['user'] password = config[profile_str][PROVIDER_NAME]['password'] url = config[profile_str][PROVIDER_NAME]['url'] conf_items = [user, password, url] missing_conf_item = [] for item in conf_items: if item == '': missing_conf_item.append(item) if missing_conf_item: self.skipTest( 'A user, password, and url must be provided to run these tests.' 'One or more of these elements is missing. Check' 'tests/integration/files/conf/cloud.providers.d/{0}.conf' .format(PROVIDER_NAME) ) def test_instance(self): ''' Tests creating and deleting an instance on vmware and installing salt ''' # create the instance profile = os.path.join( FILES, 'conf', 'cloud.profiles.d', PROVIDER_NAME + '.conf' ) profile_config = cloud_config(profile) disk_datastore = profile_config['vmware-test']['devices']['disk']['Hard disk 2']['datastore'] instance = self.run_cloud('-p vmware-test {0}'.format(INSTANCE_NAME), timeout=TIMEOUT) ret_str = '{0}:'.format(INSTANCE_NAME) disk_datastore_str = ' [{0}] {1}/Hard disk 2-flat.vmdk'.format(disk_datastore, INSTANCE_NAME) # check if instance returned with salt installed try: self.assertIn(ret_str, instance) self.assertIn(disk_datastore_str, instance, msg='Hard Disk 2 did not use the Datastore {0} '.format(disk_datastore)) except AssertionError: self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=TIMEOUT) raise # delete the instance delete = self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=TIMEOUT) ret_str = '{0}:\', \' True'.format(INSTANCE_NAME) # check if deletion was performed appropriately self.assertIn(ret_str, str(delete)) def test_snapshot(self): ''' Tests creating snapshot and creating vm with --no-deploy ''' # create the instance instance = self.run_cloud('-p vmware-test {0} --no-deploy'.format(INSTANCE_NAME), timeout=TIMEOUT) ret_str = '{0}:'.format(INSTANCE_NAME) # check if instance returned with salt installed try: self.assertIn(ret_str, instance) except AssertionError: self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=TIMEOUT) raise create_snapshot = self.run_cloud('-a create_snapshot {0} \ snapshot_name=\'Test Cloud\' \ memdump=True -y'.format(INSTANCE_NAME), timeout=TIMEOUT) s_ret_str = 'Snapshot created successfully' self.assertIn(s_ret_str, str(create_snapshot)) # delete the instance delete = self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=TIMEOUT) ret_str = '{0}:\', \' True'.format(INSTANCE_NAME) self.assertIn(ret_str, str(delete)) def tearDown(self): ''' Clean up after tests ''' query = self.run_cloud('--query') ret_str = ' {0}:'.format(INSTANCE_NAME) # if test instance is still present, delete it if ret_str in query: self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=TIMEOUT)
Python
0.999998
@@ -271,17 +271,16 @@ config%0A%0A -%0A # Import @@ -3383,16 +3383,42 @@ nstance, +%0A msg='Ha
764f785bfa34d99dc2633db78a0d80407e401993
Implement a client instance
zuora/client.py
zuora/client.py
""" Client for Zuora SOAP API """ # TODO: # - Handle debug # - Handle error # - Session policy from suds.client import Client from suds.sax.element import Element class ZuoraException(Exception): """ Base Zuora Exception. """ pass class Zuora(object): """ SOAP Client based on Suds """ def __init__(self, wsdl, login, password): self.wsdl = wsdl self.login = login self.password = password
Python
0.000426
@@ -160,16 +160,72 @@ lement%0A%0A +from zuora.transport import HttpTransportWithKeepAlive%0A%0A %0Aclass Z @@ -503,8 +503,275 @@ assword%0A +%0A self.session = None%0A self.wsdl_path = 'file://%25s' %25 os.path.abspath(self.wsdl)%0A%0A self.client = Client(%0A self.wsdl_path,%0A transport=HttpTransportWithKeepAlive())%0A%0A def __str__(self):%0A return self.client.__str__()%0A
88206513d4a04a99832ac8461a3209b2d1d7d2c8
make test work
tests/test_quality/test_restoringbeam.py
tests/test_quality/test_restoringbeam.py
import os import unittest2 as unittest from tkp.quality.restoringbeam import beam_invalid from tkp.testutil.decorators import requires_data from tkp import accessors from tkp.testutil.data import DATAPATH fits_file = os.path.join(DATAPATH, 'quality/noise/bad/home-pcarrol-msss-3C196a-analysis-band6.corr.fits') @requires_data(fits_file) class TestRestoringBeam(unittest.TestCase): def test_header(self): image = accessors.open(fits_file) (semimaj, semimin, theta) = image.beam self.assertFalse(beam_invalid(semimaj, semimin)) # TODO: this is for FOV calculation and checking #data = tkp.quality.restoringbeam.parse_fits(image) #frequency = image.freq_eff #wavelength = scipy.constants.c/frequency #d = 32.25 #fwhm = tkp.lofar.beam.fwhm(wavelength, d) #fov = tkp.lofar.beam.fov(fwhm) def test_infinite(self): smaj, smin, theta = float('inf'), float('inf'), float('inf') self.assertTrue(beam_invalid(smaj, smin, theta)) if __name__ == '__main__': unittest.main()
Python
0.000195
@@ -553,16 +553,23 @@ semimin +, theta ))%0A%0A
716ffae543838af6de7b83723ac6048a9f8f390a
improve test list latest articles
knowledge/tests/tests_views.py
knowledge/tests/tests_views.py
from __future__ import unicode_literals from model_mommy import mommy from knowledge.base import choices from knowledge.base.test import ViewTestCase from knowledge.models import Article class HomepageTestCase(ViewTestCase): from knowledge.views import Homepage view_class = Homepage view_name = 'knowledge:homepage' def setUp(self): self.category = mommy.make_recipe('knowledge.tests.category_with_articles') mommy.make_recipe('knowledge.tests.category_without_articles') for article in Article.objects.published(): article.votes.add(token=article.id, rate=choices.VoteChoice.Upvote) def test_category_list(self): response = self.get() self.assertHttpOK(response) self.assertSeqEqual(response.context_data['category_list'], [self.category]) def test_have_a_search_form_on_context(self): from knowledge.forms import SimpleSearchForm response = self.get() self.assertEqual(response.context_data['search_form'], SimpleSearchForm) def test_count_published_articles(self): response = self.get() category_list = response.context_data['category_list'] self.assertHttpOK(response) self.assertEqual(category_list[0].get_articles_count(), 1) def test_list_latest_published_articles(self): response = self.get() self.assertHttpOK(response) self.assertSeqEqual(response.context_data['new_articles'], Article.objects.published()) def test_list_top_viewed_published_articles(self): response = self.get() self.assertHttpOK(response) self.assertSeqEqual(response.context_data['new_articles'], Article.objects.published()) def test_list_top_rated_published_articles(self): response = self.get() self.assertHttpOK(response) self.assertSeqEqual(response.context_data['top_viewed_articles'], Article.objects.published())
Python
0.000001
@@ -1060,23 +1060,19 @@ est_ -count_published +list_latest _art @@ -1096,248 +1096,184 @@ -response = self.get()%0A category_list = response.context_data%5B'category_list'%5D%0A%0A self.assertHttpOK(response)%0A self.assertEqual(category_list%5B0%5D.get_articles_count(), 1)%0A%0A def test_list_latest_published_articles(self): +articles = mommy.make_recipe('knowledge.tests.published_article',%0A category=self.category,%0A _quantity=5) %0A @@ -1328,32 +1328,85 @@ ttpOK(response)%0A + self.assertEqual(Article.objects.count(), 7)%0A self.ass @@ -1460,35 +1460,16 @@ '%5D, -A +a rticle -.objects.published() +s )%0A%0A @@ -1496,26 +1496,16 @@ _viewed_ -published_ articles @@ -1704,26 +1704,16 @@ p_rated_ -published_ articles
f739b25c199fe44a6fd672ac8a01a3fc16f72e82
Add test that expects fatal error when passing `filter` with too many `.`
test/test_run_script.py
test/test_run_script.py
import subprocess import pytest def test_filter(tmp_path): unit_test = tmp_path.joinpath('some_unit_test.sv') unit_test.write_text(''' module some_unit_test; import svunit_pkg::*; `include "svunit_defines.svh" string name = "some_ut"; svunit_testcase svunit_ut; function void build(); svunit_ut = new(name); endfunction task setup(); svunit_ut.setup(); endtask task teardown(); svunit_ut.teardown(); endtask `SVUNIT_TESTS_BEGIN `SVTEST(some_failing_test) `FAIL_IF(1) `SVTEST_END `SVTEST(some_passing_test) `FAIL_IF(0) `SVTEST_END `SVUNIT_TESTS_END endmodule ''') log = tmp_path.joinpath('run.log') print('Filtering only the passing test should block the fail') subprocess.check_call(['runSVUnit', '--filter', 'some_ut.some_passing_test'], cwd=tmp_path) assert 'FAILED' not in log.read_text() print('No explicit filter should cause both tests to run, hence trigger the fail') subprocess.check_call(['runSVUnit'], cwd=tmp_path) assert 'FAILED' in log.read_text() def test_filter_wildcards(tmp_path): failing_unit_test = tmp_path.joinpath('some_failing_unit_test.sv') failing_unit_test.write_text(''' module some_failing_unit_test; import svunit_pkg::*; `include "svunit_defines.svh" string name = "some_failing_ut"; svunit_testcase svunit_ut; function void build(); svunit_ut = new(name); endfunction task setup(); svunit_ut.setup(); endtask task teardown(); svunit_ut.teardown(); endtask `SVUNIT_TESTS_BEGIN `SVTEST(some_test) `FAIL_IF(1) `SVTEST_END `SVUNIT_TESTS_END endmodule ''') passing_unit_test = tmp_path.joinpath('some_passing_unit_test.sv') passing_unit_test.write_text(''' module some_passing_unit_test; import svunit_pkg::*; `include "svunit_defines.svh" string name = "some_passing_ut"; svunit_testcase svunit_ut; function void build(); svunit_ut = new(name); endfunction task setup(); svunit_ut.setup(); endtask task teardown(); svunit_ut.teardown(); endtask `SVUNIT_TESTS_BEGIN `SVTEST(some_test) `FAIL_IF(0) `SVTEST_END `SVUNIT_TESTS_END endmodule ''') log = tmp_path.joinpath('run.log') print('Filtering only the passing testcase should block the fail') subprocess.check_call(['runSVUnit', '--filter', 'some_passing_ut.*'], cwd=tmp_path) assert 'FAILED' not in log.read_text() assert 'some_test' in log.read_text() print('Filtering only for the test should cause both tests to run, hence trigger the fail') subprocess.check_call(['runSVUnit', '--filter', "*.some_test"], cwd=tmp_path) assert 'FAILED' in log.read_text() def test_filter_without_dot(tmp_path): dummy_unit_test = tmp_path.joinpath('dummy_unit_test.sv') dummy_unit_test.write_text(''' module dummy_unit_test; import svunit_pkg::*; `include "svunit_defines.svh" string name = "some_passing_ut"; svunit_testcase svunit_ut; function void build(); svunit_ut = new(name); endfunction task setup(); svunit_ut.setup(); endtask task teardown(); svunit_ut.teardown(); endtask `SVUNIT_TESTS_BEGIN `SVUNIT_TESTS_END endmodule ''') subprocess.check_call(['runSVUnit', '--filter', 'some_string'], cwd=tmp_path) log = tmp_path.joinpath('run.log') assert 'fatal' in log.read_text()
Python
0
@@ -3390,28 +3390,708 @@ 'fatal' in log.read_text()%0A +%0A%0A%0Adef test_filter_with_extra_dot(tmp_path):%0A dummy_unit_test = tmp_path.joinpath('dummy_unit_test.sv')%0A dummy_unit_test.write_text('''%0Amodule dummy_unit_test;%0A%0A import svunit_pkg::*;%0A %60include %22svunit_defines.svh%22%0A%0A string name = %22some_passing_ut%22;%0A svunit_testcase svunit_ut;%0A%0A function void build();%0A svunit_ut = new(name);%0A endfunction%0A%0A task setup();%0A svunit_ut.setup();%0A endtask%0A%0A task teardown();%0A svunit_ut.teardown();%0A endtask%0A%0A %60SVUNIT_TESTS_BEGIN%0A %60SVUNIT_TESTS_END%0A%0Aendmodule%0A ''')%0A%0A subprocess.check_call(%5B'runSVUnit', '--filter', 'a.b.c'%5D, cwd=tmp_path)%0A%0A log = tmp_path.joinpath('run.log')%0A assert 'fatal' in log.read_text()%0A%0A
faa912ed8d2cb68b2f8661ed3550745967f58ba1
fix broken config path
test/test_transports.py
test/test_transports.py
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import datetime import ssl from datetime import date import pytest from wampy.peers.clients import Client from wampy.peers.routers import Crossbar from wampy.roles.callee import callee from wampy.testing.helpers import wait_for_session, wait_for_registrations class DateService(Client): @callee def get_todays_date(self): return datetime.date.today().isoformat() class TestIP4(object): @pytest.fixture(scope="function") def config_path(self): return './wampy/testing/configs/crossbar.config.ipv4.json' def test_ipv4_websocket_connection(self, config_path, router): with router: service = DateService(router=router) with service: wait_for_registrations(service, 1) client = Client(router=router) with client: result = client.rpc.get_todays_date() today = date.today() assert result == today.isoformat() class TestIP6(object): @pytest.fixture(scope="function") def config_path(self): return './wampy/testing/configs/crossbar.ipv6.json' def test_ipv6_websocket_connection(self, config_path, router): with router: service = DateService(router=router) with service: wait_for_registrations(service, 1) client = Client(router=router) with client: result = client.rpc.get_todays_date() today = date.today() assert result == today.isoformat() def test_ipv4_secure_websocket_connection(): try: ssl.PROTOCOL_TLSv1_2 except AttributeError: pytest.skip('Python Environment does not support TLS') # note that TLS not supported by crossbar on ipv6 crossbar = Crossbar( config_path='./wampy/testing/configs/crossbar.tls.json', crossbar_directory='./', ) with crossbar as router: with DateService(router=router, use_tls=True) as service: wait_for_registrations(service, 1) client = Client(router=router, use_tls=True) with client: wait_for_session(client) result = client.rpc.get_todays_date() today = date.today() assert result == today.isoformat()
Python
0.000004
@@ -721,20 +721,8 @@ bar. -config.ipv4. json
ea3e4dea51d4daa7381a19b65553f2526d0b4760
correct docstring reference.
balloon_learning_environment/utils/sampling_test.py
balloon_learning_environment/utils/sampling_test.py
# coding=utf-8 # Copyright 2021 The Balloon Learning Environment Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for balloon_learning_environment.google.sampling.""" from absl.testing import absltest from absl.testing import parameterized from balloon_learning_environment.env.balloon import standard_atmosphere from balloon_learning_environment.utils import sampling from balloon_learning_environment.utils import units import jax class SamplingTest(parameterized.TestCase): def setUp(self): super().setUp() # Deterministic PRNG state. Tests MUST NOT rely on a specific seed. self.prng_key = jax.random.PRNGKey(123) self.atmosphere = standard_atmosphere.Atmosphere(jax.random.PRNGKey(0)) def test_sample_location_with_seed_gives_deterministic_lat_lng(self): location1 = sampling.sample_location(self.prng_key) location2 = sampling.sample_location(self.prng_key) self.assertEqual(location1, location2) def test_sample_location_gives_valid_lat_lng(self): latlng = sampling.sample_location(self.prng_key) # We only allow locations near the equator self.assertBetween(latlng.lat.degrees, -10.0, 10.0) # We don't allow locations near the international date line self.assertBetween(latlng.lng.degrees, -175.0, 175.0) def test_sample_time_with_seed_gives_deterministic_time(self): t1 = sampling.sample_time(self.prng_key) t2 = sampling.sample_time(self.prng_key) self.assertEqual(t1, t2) def test_sample_time_gives_time_within_range(self): # Pick a 1 hour segment to give a small valid range for testing begin_range = units.datetime(year=2020, month=1, day=1, hour=1) end_range = units.datetime(year=2020, month=1, day=1, hour=2) t = sampling.sample_time( self.prng_key, begin_range=begin_range, end_range=end_range) self.assertBetween(t, begin_range, end_range) def test_sample_pressure_with_seed_gives_deterministic_pressure(self): p1 = sampling.sample_pressure(self.prng_key, self.atmosphere) p2 = sampling.sample_pressure(self.prng_key, self.atmosphere) self.assertEqual(p1, p2) def test_sample_pressure_gives_pressure_within_range(self): p = sampling.sample_pressure(self.prng_key, self.atmosphere) self.assertBetween(p, 5000, 14000) def test_sample_upwelling_infrared_is_within_range(self): ir = sampling.sample_upwelling_infrared(self.prng_key) self.assertBetween(ir, 100.0, 350.0) @parameterized.named_parameters( dict(testcase_name='logit_normal', distribution_type='logit_normal'), dict(testcase_name='inverse_lognormal', distribution_type='inverse_lognormal')) def test_sample_upwelling_infrared_is_within_range_nondefault( self, distribution_type): ir = sampling.sample_upwelling_infrared(self.prng_key, distribution_type=distribution_type) self.assertBetween(ir, 100.0, 350.0) def test_sample_upwelling_infrared_invalid_distribution_type(self): with self.assertRaises(ValueError): sampling.sample_upwelling_infrared(self.prng_key, distribution_type='invalid') if __name__ == '__main__': absltest.main()
Python
0.001089
@@ -660,14 +660,13 @@ ent. -google +utils .sam
1112495ae59542ad76d1cc72f40ab91e7e562f1c
Simplify mac_epoch_diff
Lib/fontTools/ttLib/tables/_h_e_a_d.py
Lib/fontTools/ttLib/tables/_h_e_a_d.py
from __future__ import print_function, division from fontTools.misc.py23 import * from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval, num2binary, binary2num from . import DefaultTable import time import calendar headFormat = """ > # big endian tableVersion: 16.16F fontRevision: 16.16F checkSumAdjustment: I magicNumber: I flags: H unitsPerEm: H created: Q modified: Q xMin: h yMin: h xMax: h yMax: h macStyle: H lowestRecPPEM: H fontDirectionHint: h indexToLocFormat: h glyphDataFormat: h """ class table__h_e_a_d(DefaultTable.DefaultTable): dependencies = ['maxp', 'loca'] def decompile(self, data, ttFont): dummy, rest = sstruct.unpack2(headFormat, data, self) if rest: # this is quite illegal, but there seem to be fonts out there that do this assert rest == "\0\0" def compile(self, ttFont): self.modified = int(time.time() - mac_epoch_diff) data = sstruct.pack(headFormat, self) return data def toXML(self, writer, ttFont): writer.comment("Most of this table will be recalculated by the compiler") writer.newline() formatstring, names, fixes = sstruct.getformat(headFormat) for name in names: value = getattr(self, name) if name in ("created", "modified"): try: value = time.asctime(time.gmtime(max(0, value + mac_epoch_diff))) except ValueError: value = time.asctime(time.gmtime(0)) if name in ("magicNumber", "checkSumAdjustment"): if value < 0: value = value + 0x100000000 value = hex(value) if value[-1:] == "L": value = value[:-1] elif name in ("macStyle", "flags"): value = num2binary(value, 16) writer.simpletag(name, value=value) writer.newline() def fromXML(self, name, attrs, content, ttFont): value = attrs["value"] if name in ("created", "modified"): value = calendar.timegm(time.strptime(value)) - mac_epoch_diff elif name in ("macStyle", "flags"): value = binary2num(value) else: value = safeEval(value) setattr(self, name, value) def calc_mac_epoch_diff(): """calculate the difference between the original Mac epoch (1904) to the epoch on this machine. """ safe_epoch_t = (1972, 1, 1, 0, 0, 0, 0, 0, 0) safe_epoch = time.mktime(safe_epoch_t) - time.timezone # This assert fails in certain time zones, with certain daylight settings #assert time.gmtime(safe_epoch)[:6] == safe_epoch_t[:6] seconds1904to1972 = 60 * 60 * 24 * (365 * (1972-1904) + 17) # thanks, Laurence! return int(safe_epoch - seconds1904to1972) mac_epoch_diff = calc_mac_epoch_diff() _months = [' ', 'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'] _weekdays = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
Python
0.999998
@@ -2177,54 +2177,11 @@ )%0A%0A%0A -def calc_mac_epoch_diff():%0A%09%22%22%22calculate the d +# D iffe @@ -2223,18 +2223,17 @@ h (1904) -%0A%09 + to the e @@ -2258,411 +2258,71 @@ ne.%0A -%09%22%22%22%0A%09safe_epoch_t = (1972, 1, 1, 0, 0, 0, 0, 0, 0)%0A%09safe_epoch = time.mktime(safe_epoch_t) - time.timezone%0A%09# This assert fails in certain time zones, with certain daylight settings%0A%09#assert +mac_epoch_diff = calendar. time -. gm -time(safe_epoch)%5B:6%5D == safe_epoch_t%5B:6%5D%0A%09seconds1904to1972 = 60 * 60 * 24 * (365 * (1972-1904) + 17) # thanks, Laurence!%0A%09return int(safe_epoch - seconds1904to1972)%0A%0Amac_epoch_diff = calc_mac_epoch_diff( +((1904, 1, 1, 0, 0, 0, 0, 0, 0) )%0A%0A%0A
c69ac39ee650445533d31a4a476f6f3b14cb43ca
Update roles.py
site/models/roles.py
site/models/roles.py
import datetime, re; from sqlalchemy.orm import validates; from server import DB, FlaskServer; class Roles(DB.Model): id = DB.Column(DB.Integer, primary_key=True, autoincrement=True); name = DB.Column(DB.String(20); district_id = DB.relationship(DB.Integer, DB.ForeignKey('district.id')); created_by = DB.relationship(DB.Integer, DB.ForeignKey('users.id')); created_at = DB.Column(DB.DateTime); updated_by = DB.relationship(DB.Integer, DB.ForeignKey('users.id'), nullable=True); updated_at = DB.Column(DB.DateTime, nullable=True); def __init__(self, name, created_at, updated_at): self.name = name; self.created_at = datetime.datetime.now(); self.updated_at = self.created_at;
Python
0.000001
@@ -473,31 +473,16 @@ ers.id') -, nullable=True );%0D%0A%09upd
a39dcc1548bea483225635292c5e1f489a3288a2
support direct line navigation shorthand (submission from RUBY-12579 improved) #RUBY-12579 fixed
platform/platform-resources/src/launcher.py
platform/platform-resources/src/launcher.py
#!/usr/bin/python import socket import struct import sys import os import os.path import time # see com.intelij.idea.SocketLock for the server side of this interface RUN_PATH = '$RUN_PATH$' CONFIG_PATH = '$CONFIG_PATH$' args = [] skip_next = False for arg in sys.argv[1:]: if arg == '-l' or arg == '--line': args.append(arg) skip_next = True elif skip_next: args.append(arg) skip_next = False else: args.append(os.path.abspath(arg)) def launch_with_port(port): found = False s = socket.socket() s.settimeout(0.3) try: s.connect(('127.0.0.1', port)) except: return False while True: try: path_len = struct.unpack(">h", s.recv(2))[0] path = s.recv(path_len) path = os.path.abspath(path) if os.path.abspath(path) == os.path.abspath(CONFIG_PATH): found = True break except: break if found: if args: cmd = "activate " + "\0".join(args) encoded = struct.pack(">h", len(cmd)) + cmd s.send(encoded) time.sleep(0.5) # don't close socket immediately return True return False port = -1 try: f = open(os.path.join(CONFIG_PATH, 'port')) port = int(f.read()) except Exception: type, value, traceback = sys.exc_info() print(value) port = -1 if port == -1: # SocketLock actually allows up to 50 ports, but the checking takes too long for port in range(6942, 6942+10): if launch_with_port(port): exit() else: if launch_with_port(port): exit() if sys.platform == "darwin": # Mac OS: RUN_PATH is *.app path if len(args): args.insert(0, "--args") os.execvp("open", ["-a", RUN_PATH] + args) else: # unix common bin_dir, bin_file = os.path.split(RUN_PATH) os.chdir(bin_dir) os.execv(bin_file, [bin_file] + args)
Python
0
@@ -289,162 +289,682 @@ = '- -l' or arg == '--line':%0A args.append(arg)%0A skip_next = True%0A elif skip_next:%0A args.append(arg)%0A skip_next = False%0A else:%0A +h' or arg == '-?' or arg == '--help':%0A print 'Usage: ' + sys.argv%5B0%5D + ' %5B-h%7C-?%7C--help%5D %5B-l%7C--line %3Cline_number%3E%5D %5Bfile_path%5B:%3Cline_number%3E%5D%5D'%0A exit( 0 )%0A elif arg == '-l' or arg == '--line':%0A args.append(arg)%0A skip_next = True%0A elif skip_next:%0A args.append(arg)%0A skip_next = False%0A else:%0A if ':' in arg:%0A filepath, line_number = arg.rsplit( ':', 1 )%0A if line_number.isdigit():%0A args.append( '-l' )%0A args.append( line_number )%0A args.append( os.path.abspath( filepath ) )%0A else:%0A args.append(os.path.abspath(arg))%0A else:%0A
7d81f6000e9b29059eafdc7f82598ea78c048119
Fix linter
tests/unit/modules/test_libcloud_loadbalancer.py
tests/unit/modules/test_libcloud_loadbalancer.py
# -*- coding: utf-8 -*- ''' :codeauthor: :email:`Anthony Shaw <anthonyshaw@apache.org>` ''' # Import Python Libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase, skipIf from tests.support.mock import ( patch, MagicMock, NO_MOCK, NO_MOCK_REASON ) import salt.modules.libcloud_loadbalancer as libcloud_loadbalancer try: from libcloud.loadbalancer.base import BaseDriver, LoadBalancer, Algorithm, Member HAS_LIBCLOUD = True except ImportError: HAS_LIBCLOUD = False MockLBDriver = object if HASLIBCLOUD: class MockLBDriver(BaseDriver): def __init__(self): self._TEST_BALANCER = LoadBalancer( id='test_id', name='test_balancer', state=0, # RUNNING ip='1.2.3.4', port=80, driver=self, extra={}) self._TEST_MEMBER = Member( id='member_id', ip='12.3.4.5', port=443, balancer=self._TEST_BALANCER, extra=None) def get_balancer(self, balancer_id): assert balancer_id == 'test_id' return self._TEST_BALANCER def list_balancers(self): return [self._TEST_BALANCER] def list_protocols(self): return ['http', 'https'] def create_balancer(self, name, port, protocol, algorithm, members): assert name == 'new_test_balancer' assert port == 80 assert protocol == 'http' assert isinstance(algorithm, (Algorithm, int)) assert isinstance(members, list) return self._TEST_BALANCER def destroy_balancer(self, balancer): assert balancer == self._TEST_BALANCER return True def balancer_attach_member(self, balancer, member): assert isinstance(balancer, LoadBalancer) assert isinstance(member, Member) assert member.id is None assert balancer.id == 'test_id' return self._TEST_MEMBER def balancer_detach_member(self, balancer, member): assert isinstance(balancer, LoadBalancer) assert isinstance(member, Member) assert member.id == 'member_id' assert balancer.id == 'test_id' return True def balancer_list_members(self, balancer): assert isinstance(balancer, LoadBalancer) assert balancer.id == 'test_id' return [self._TEST_MEMBER] def get_mock_driver(): return MockLBDriver() @skipIf(not HAS_LIBCLOUD, NO_MOCK_REASON) @skipIf(NO_MOCK, NO_MOCK_REASON) @patch('salt.modules.libcloud_loadbalancer._get_driver', MagicMock(return_value=MockLBDriver())) class LibcloudLoadBalancerModuleTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): module_globals = { '__salt__': { 'config.option': MagicMock(return_value={ 'test': { 'driver': 'test', 'key': '2orgk34kgk34g' } }) } } if libcloud_loadbalancer.HAS_LIBCLOUD is False: module_globals['sys.modules'] = {'libcloud': MagicMock()} return {libcloud_loadbalancer: module_globals} def test_module_creation(self): client = libcloud_loadbalancer._get_driver('test') self.assertFalse(client is None) def test_init(self): with patch('salt.utils.compat.pack_dunder', return_value=False) as dunder: libcloud_loadbalancer.__init__(None) dunder.assert_called_with('salt.modules.libcloud_loadbalancer') def _validate_balancer(self, balancer): self.assertEqual(balancer['name'], 'test_balancer') def _validate_member(self, member): self.assertEqual(member['id'], 'member_id') self.assertEqual(member['ip'], '12.3.4.5') def test_list_balancers(self): balancers = libcloud_loadbalancer.list_balancers('test') self.assertEqual(len(balancers), 1) self._validate_balancer(balancers[0]) def test_list_protocols(self): protocols = libcloud_loadbalancer.list_protocols('test') self.assertEqual(len(protocols), 2) self.assertTrue('http' in protocols) def test_create_balancer(self): balancer = libcloud_loadbalancer.create_balancer('new_test_balancer', 80, 'http', 'test') self._validate_balancer(balancer) def test_create_balancer_custom_algorithm(self): balancer = libcloud_loadbalancer.create_balancer('new_test_balancer', 80, 'http', 'test', algorithm='LEAST_CONNECTIONS') self._validate_balancer(balancer) def test_destroy_balancer(self): result = libcloud_loadbalancer.destroy_balancer('test_id', 'test') self.assertTrue(result) def test_get_balancer_by_name(self): balancer = libcloud_loadbalancer.get_balancer_by_name('test_balancer', 'test') self._validate_balancer(balancer) def test_get_balancer(self): balancer = libcloud_loadbalancer.get_balancer('test_id', 'test') self._validate_balancer(balancer) def test_balancer_attach_member(self): member = libcloud_loadbalancer.balancer_attach_member('test_id', '12.3.4.5', 443, 'test') self._validate_member(member) def test_balancer_detach_member(self): result = libcloud_loadbalancer.balancer_detach_member('test_id', 'member_id', 'test') self.assertTrue(result) def test_list_balancer_members(self): members = libcloud_loadbalancer.list_balancer_members('test_id', 'test') self._validate_member(members[0])
Python
0.000002
@@ -640,42 +640,16 @@ alse -%0A MockLBDriver = object %0A%0Aif HAS LIBC @@ -644,16 +644,17 @@ %0A%0Aif HAS +_ LIBCLOUD @@ -714,24 +714,49 @@ nit__(self): + # pylint: disable=W0231 %0A @@ -2623,16 +2623,50 @@ MBER%5D%0A%0A%0A +else:%0A MockLBDriver = object%0A%0A%0A def get_
c22a0c795e5856279090ff12db435b45c1515100
increase count time on ip retrieval within ansible utils
kvirt/ansibleutils/__init__.py
kvirt/ansibleutils/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ interact with a local/remote libvirt daemon """ import os import time from kvirt.common import pprint from yaml import dump def play(k, name, playbook, variables=[], verbose=False, user=None, tunnel=False, tunnelhost=None, tunnelport=None, tunneluser=None, yamlinventory=False): """ :param k: :param name: :param playbook: :param variables: :param verbose: :param tunnelhost: :param tunnelport: :param tunneluser: """ if user is None: info = k.info(name, debug=False) user = info.get('user', 'root') ip = None counter = 0 while counter != 80: ip = k.ip(name) if ip is not None: break else: pprint("Retrieving ip of %s..." % name, color='blue') time.sleep(5) counter += 10 if ip is None: pprint("No ip found for %s. Not running playbook" % name, color='red') return if yamlinventory: info = {'ansible_user': user} inventoryfile = "/tmp/%s.inv.yaml" % name if '.' in ip: info['ansible_host'] = ip else: info['ansible_host'] = '127.0.0.1' info['ansible_port'] = ip inventory = {'ungrouped': {'hosts': {name: info}}} else: inventoryfile = "/tmp/%s.inv" % name if '.' in ip: inventory = "%s ansible_host=%s ansible_user=%s" % (name, ip, user) else: inventory = "%s ansible_host=127.0.0.1 ansible_user=%s ansible_port=%s" % (name, user, ip) ansiblecommand = "ansible-playbook" if verbose: ansiblecommand = "%s -vvv" % ansiblecommand if variables is not None: for variable in variables: if not isinstance(variable, dict) or len(list(variable)) != 1: continue else: key, value = list(variable)[0], variable[list(variable)[0]] if yamlinventory: inventory['ungrouped']['hosts'][name][key] = value else: inventory += " %s=%s" % (key, value) if tunnel and tunnelport and tunneluser: tunnelinfo = "-o ProxyCommand=\"ssh -p %s -W %%h:%%p %s@%s\"" % (tunnelport, tunneluser, tunnelhost) if yamlinventory: inventory['ungrouped']['hosts'][name]['ansible_ssh_common_args'] = tunnelinfo else: inventory += " ansible_ssh_common_args='%s'" % tunnelinfo with open(inventoryfile, 'w') as f: if yamlinventory: dump(inventory, f, default_flow_style=False) else: f.write("%s\n" % inventory) pprint("Ansible Command run:") pprint("%s -T 20 -i %s %s" % (ansiblecommand, inventoryfile, playbook)) os.system("%s -T 20 -i %s %s" % (ansiblecommand, inventoryfile, playbook)) def vm_inventory(k, name, user=None, yamlinventory=False): """ :param self: :param name: :return: """ if user is None: info = k.info(name, debug=False) user = info.get('user', 'root') counter = 0 while counter != 80: ip = k.ip(name) if ip is None: time.sleep(5) pprint("Retrieving ip of %s..." % name) counter += 10 else: break info = {'ansible_user': user} if yamlinventory else '' if ip is not None: if '.' in ip: if yamlinventory: info['ansible_host'] = ip else: info = "%s ansible_host=%s ansible_user=%s" % (name, ip, user) else: if yamlinventory: info['ansible_host'] = '127.0.0.1' info['ansible_port'] = ip else: info = "%s ansible_host=127.0.0.1 ansible_user=%s ansible_port=%s" % (name, user, ip) return info else: return None def make_plan_inventory(vms_to_host, plan, vms, groups={}, user=None, yamlinventory=False): """ :param vms_per_host: :param plan: :param vms: :param groups: :param user: :param yamlinventory: """ inventory = {} inventoryfile = "/tmp/%s.inv.yaml" % plan if yamlinventory else "/tmp/%s.inv" % plan pprint("Generating inventory %s" % inventoryfile, color='blue') allvms = vms inventory[plan] = {} if groups: inventory[plan] = {'children': {}} for group in groups: inventory[plan]['children'][group] = {} for group in groups: nodes = groups[group] inventory[plan]['children'][group]['hosts'] = {} for name in nodes: allvms.remove(name) k = vms_to_host[name].k inv = vm_inventory(k, name, user=user, yamlinventory=yamlinventory) if inv is not None: inventory[plan]['children'][group]['hosts'][name] = inv if vms_to_host[name].tunnel: tunnelinfo = "-o ProxyCommand=\"ssh -p %s -W %%h:%%p %s@%s\"" % ( vms_to_host[name].port, vms_to_host[name].user, vms_to_host[name].host) if yamlinventory: inventory[plan]['children'][group]['hosts'][name]['ansible_ssh_common_args'] = tunnelinfo else: inventory[plan]['hosts'][name] += " ansible_ssh_common_args='%s'" % tunnelinfo inventory[plan]['hosts'] = {} for name in allvms: k = vms_to_host[name].k inv = vm_inventory(k, name, user=user, yamlinventory=yamlinventory) if inv is not None: inventory[plan]['hosts'][name] = inv if vms_to_host[name].tunnel: tunnelinfo = "-o ProxyCommand=\"ssh -p %s -W %%h:%%p %s@%s\"" % ( vms_to_host[name].port, vms_to_host[name].user, vms_to_host[name].host) if yamlinventory: inventory[plan]['hosts'][name]['ansible_ssh_common_args'] = tunnelinfo else: inventory[plan]['hosts'][name] += " ansible_ssh_common_args='%s'" % tunnelinfo with open(inventoryfile, "w") as f: if yamlinventory: dump({'all': {'children': inventory}}, f, default_flow_style=False) else: inventorystr = '' if groups: for group in inventory[plan]['children']: inventorystr += "[%s]\n" % group for name in inventory[plan]['children'][group]['hosts']: inventorystr += "%s\n" % inventory[plan]['children'][group]['hosts'][name] else: inventorystr += "[%s]\n" % plan for name in inventory[plan]['hosts']: inventorystr += "%s\n" % inventory[plan]['hosts'][name] f.write("%s\n" % inventorystr)
Python
0
@@ -659,33 +659,34 @@ hile counter != -8 +12 0:%0A ip = @@ -3124,17 +3124,18 @@ nter != -8 +12 0:%0A
15c4d677af6e2aabf757bc5d0b8df88413581c67
Fix filename of cuDNN 8.1 for CUDA 10.2 on Windows
cupyx/tools/install_library.py
cupyx/tools/install_library.py
#!/usr/bin/env python """ CUDA Library Installer Installs the latest CUDA library supported by CuPy. """ # This script will also be used as a standalone script when building wheels. # Keep the script runnable without CuPy dependency. import argparse import json import os import platform import shutil import sys import tempfile import urllib.request _cudnn_records = [] def _make_cudnn_url(public_version, filename): # https://developer.download.nvidia.com/compute/redist/cudnn/v8.0.2/cudnn-11.0-linux-x64-v8.0.2.39.tgz return ( 'https://developer.download.nvidia.com/compute/redist/cudnn' + '/v{}/{}'.format(public_version, filename)) def _make_cudnn_record( cuda_version, public_version, filename_linux, filename_windows): major_version = public_version.split('.')[0] return { 'cuda': cuda_version, 'cudnn': public_version, 'assets': { 'Linux': { 'url': _make_cudnn_url(public_version, filename_linux), 'filename': 'libcudnn.so.{}'.format(public_version), }, 'Windows': { 'url': _make_cudnn_url(public_version, filename_windows), 'filename': 'cudnn64_{}.dll'.format(major_version), }, } } # Latest cuDNN versions: https://developer.nvidia.com/rdp/cudnn-download _cudnn_records.append(_make_cudnn_record( '11.1', '8.1.0', 'cudnn-11.2-linux-x64-v8.1.0.77.tgz', 'cudnn-11.2-windows-x64-v8.1.0.77.zip')) _cudnn_records.append(_make_cudnn_record( '11.0', '8.1.0', 'cudnn-11.2-linux-x64-v8.1.0.77.tgz', 'cudnn-11.2-windows-x64-v8.1.0.77.zip')) _cudnn_records.append(_make_cudnn_record( '10.2', '8.1.0', 'cudnn-10.2-linux-x64-v8.1.0.77.tgz', 'cudnn-10.2-windows-x64-v8.1.0.77.zip')) _cudnn_records.append(_make_cudnn_record( '10.1', '8.0.5', 'cudnn-10.1-linux-x64-v8.0.5.39.tgz', 'cudnn-10.1-windows10-x64-v8.0.5.39.zip')) _cudnn_records.append(_make_cudnn_record( '10.0', '7.6.5', 'cudnn-10.0-linux-x64-v7.6.5.32.tgz', 'cudnn-10.0-windows10-x64-v7.6.5.32.zip')) _cudnn_records.append(_make_cudnn_record( '9.2', '7.6.5', 'cudnn-9.2-linux-x64-v7.6.5.32.tgz', 'cudnn-9.2-windows10-x64-v7.6.5.32.zip')) def install_cudnn(cuda, prefix): record = None for record in _cudnn_records: if record['cuda'] == cuda: break else: raise RuntimeError(''' The CUDA version specified is not supported. Should be one of {}.'''.format(str([x['cuda'] for x in _cudnn_records]))) if prefix is None: prefix = os.path.expanduser('~/.cupy/cuda_lib') destination = calculate_destination(prefix, cuda, 'cudnn', record['cudnn']) if os.path.exists(destination): raise RuntimeError(''' The destination directory {} already exists. Remove the directory first if you want to reinstall.'''.format(destination)) asset = record['assets'][platform.system()] print('Installing cuDNN {} for CUDA {} to: {}'.format( record['cudnn'], record['cuda'], destination)) url = asset['url'] print('Downloading {}...'.format(url)) with tempfile.TemporaryDirectory() as tmpdir: with open(os.path.join(tmpdir, os.path.basename(url)), 'wb') as f: with urllib.request.urlopen(url) as response: f.write(response.read()) print('Extracting...') shutil.unpack_archive(f.name, tmpdir) print('Installing...') shutil.move(os.path.join(tmpdir, 'cuda'), destination) print('Cleaning up...') print('Done!') def calculate_destination(prefix, cuda, lib, lib_ver): """Calculates the installation directory. ~/.cupy/cuda_lib/{cuda_version}/{library_name}/{library_version} """ return os.path.join(prefix, cuda, lib, lib_ver) def main(args): parser = argparse.ArgumentParser() # TODO(kmaehashi) support cuTENSOR and NCCL parser.add_argument('--library', choices=['cudnn'], required=True, help='Library to install') parser.add_argument('--cuda', type=str, required=True, help='CUDA version') parser.add_argument('--prefix', type=str, default=None, help='Install destination') parser.add_argument('--action', choices=['install', 'dump'], default='install', help='Action to perform') params = parser.parse_args(args) if params.prefix is not None: params.prefix = os.path.abspath(params.prefix) if params.library == 'cudnn': if params.action == 'install': install_cudnn(params.cuda, params.prefix) elif params.action == 'dump': print(json.dumps(_cudnn_records, indent=4)) else: assert False else: assert False if __name__ == '__main__': main(sys.argv[1:])
Python
0.000784
@@ -1778,32 +1778,34 @@ dnn-10.2-windows +10 -x64-v8.1.0.77.z
8a5d931cb66dc452e9db6f52ac7fcc371a855608
Update curate_teleco_performance_data.py
lab-01/cell-tower-anomaly-detection/00-scripts/curate_teleco_performance_data.py
lab-01/cell-tower-anomaly-detection/00-scripts/curate_teleco_performance_data.py
# ====================================================================================== # ABOUT # In this PySpark script, we augment the Telecom data with curated customer data (prior # job), curate it and persist to GCS # ====================================================================================== import configparser from datetime import datetime import os import json from pyspark.sql import SparkSession from pyspark.sql.functions import udf, col, substring, lit, when, avg from pyspark.sql import functions as F from pyspark.sql.functions import input_file_name import random from pyspark.sql.types import * from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, dayofweek, date_format from pyspark import SparkContext, SparkConf, SQLContext from google.cloud import storage import sys # Parse arguments sourceBucketNm=sys.argv[1] # Source data definition customerCuratedDataDir="gs://"+sourceBucketNm+"/cell-tower-anomaly-detection/output_data/customer_augmented/part*" telcoCustomerChurnDataDir="gs://"+sourceBucketNm+"/cell-tower-anomaly-detection/01-datasets/telecom_customer_churn_data.csv" # Output directory declaration outputGCSURI="gs://"+sourceBucketNm+"/cell-tower-anomaly-detection/output_data" # Get or create a Spark session spark =SparkSession.builder.appName("cell_tower_performance_dataset-exploration").getOrCreate() # Read the curated customer data (blended with services threshold data) from GCS curatedCustomerDataDF = spark.read.format("parquet").option("header", True).option("inferschema",True).load(customerCuratedDataDir) # Read the telecom customer churn data from GCS telecomCustomerChurnRawDataDF = spark.read.format("csv").option("header", True).option("inferschema",True).load(telcoCustomerChurnDataDir) telecomCustomerChurnRawDataDF.printSchema() # Subset the telecom customer churn/performance data for relevant attributes # ... Create subset telecomCustomerChurnSubsetDF = telecomCustomerChurnRawDataDF.selectExpr("roam_Mean","change_mou","drop_vce_Mean","drop_dat_Mean","blck_vce_Mean","blck_dat_Mean","plcd_vce_Mean","plcd_dat_Mean","comp_vce_Mean","comp_dat_Mean","peak_vce_Mean","peak_dat_Mean","mou_peav_Mean","mou_pead_Mean","opk_vce_Mean","opk_dat_Mean","mou_opkv_Mean","mou_opkd_Mean","drop_blk_Mean","callfwdv_Mean","callwait_Mean","churn","months","uniqsubs","actvsubs","area","dualband","forgntvl","Customer_ID") # ... Create a column called customer_ID_Short that is a substring of the original Customer_ID telecomCustomerChurnFinalDF=telecomCustomerChurnSubsetDF.withColumn('customer_ID_Short', substring('Customer_ID', 4,7)) # ... Rename the Customer_ID column, customer_ID_original telecomCustomerChurnFinalDF.withColumnRenamed('Customer_ID', 'customer_ID_original') # ... Rename the newly added customer_ID_Short column, customer_ID telecomCustomerChurnFinalDF.withColumnRenamed('customer_ID_Short', 'customer_ID') # ... Quick visual telecomCustomerChurnFinalDF.show(10,truncate=False) # Join the curated customer data with the telecom network performance data based on customer ID consolidatedDataDF = curatedCustomerDataDF.join(telecomCustomerChurnFinalDF, curatedCustomerDataDF.customerID == telecomCustomerChurnFinalDF.customer_ID, "inner").drop(telecomCustomerChurnFinalDF.customer_ID).drop(telecomCustomerChurnFinalDF.churn) consolidatedDataDF.show(truncate=False) # Persist the augmented telecom tower performance data to GCS consolidatedDataDF.write.parquet(os.path.join(outputGCSURI, "telco_performance_augmented"), mode = "overwrite")
Python
0
@@ -2672,16 +2672,44 @@ riginal%0A +telecomCustomerChurnFinalDF= telecomC @@ -2844,32 +2844,60 @@ mn, customer_ID%0A +telecomCustomerChurnFinalDF= telecomCustomerC
ea542282911cbc7b3cf594a20175fbddcbd75a89
Use absolute import
restapi_logging_handler/__init__.py
restapi_logging_handler/__init__.py
from loggly_handler import LogglyHandler from restapi_logging_handler import RestApiHandler
Python
0.000045
@@ -1,13 +1,77 @@ from +__future__ import absolute_import%0A%0Afrom restapi_logging_handler. loggly_h @@ -118,32 +118,56 @@ _logging_handler +.restapi_logging_handler import RestApiH
128c54529da80d5f84a0bf8a9bca6e83ed14a342
Delete unused import
simplesqlite/loader/html/formatter.py
simplesqlite/loader/html/formatter.py
# encoding: utf-8 """ .. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com> """ from __future__ import absolute_import import bs4 import dataproperty from ..constant import TableNameTemplate as tnt from ..data import TableData from ..error import InvalidDataError from ..formatter import TableFormatter class HtmlTableFormatter(TableFormatter): def __init__(self, source_data): super(HtmlTableFormatter, self).__init__(source_data) try: self.__soup = bs4.BeautifulSoup(self._source_data, "lxml") except bs4.FeatureNotFound: self.__soup = bs4.BeautifulSoup(self._source_data, "html.parser") def to_table_data(self): self._validate_source_data() for table in self.__soup.find_all("table"): tabledata = self.__parse_html(table) yield tabledata def _make_table_name(self): table_name = self._loader.make_table_name() key = self.__table_id if dataproperty.is_empty_string(key): key = "{:s}{:d}".format( self._loader.format_name, self._loader.get_format_table_count()) return table_name.replace(tnt.KEY, key) def __parse_html(self, table): header_list = [] data_matrix = [] self.__table_id = table.get("id") row_list = table.find_all("tr") for row in row_list: col_list = row.find_all("td") if dataproperty.is_empty_sequence(col_list): th_list = row.find_all("th") if dataproperty.is_empty_sequence(th_list): continue header_list = [row.text.strip() for row in th_list] continue data_list = [value.text.strip() for value in col_list] data_matrix.append(data_list) self._loader.inc_table_count() return TableData( self._make_table_name(), header_list, data_matrix)
Python
0.000001
@@ -230,45 +230,8 @@ ata%0A -from ..error import InvalidDataError%0A from
980f39eb6f17eb2469573dba71c4d7ac3d72220d
Order users by date joined
resrc/userprofile/views.py
resrc/userprofile/views.py
# -*- coding: utf-8 -*-: from django.shortcuts import redirect, get_object_or_404 from django.http import Http404 from django.contrib.auth.models import User, SiteProfileNotAvailable from django.contrib.auth.decorators import login_required from django.contrib.auth import authenticate, login, logout from django.contrib import messages from django.core.urlresolvers import reverse from django.core.context_processors import csrf from resrc.utils.tokens import generate_token from resrc.utils import render_template from resrc.userprofile.models import Profile from resrc.userprofile.forms import LoginForm, ProfileForm, RegisterForm, ChangePasswordForm @login_required def user_list(request): #profiles = Profile.objects.select_related('User').order_by('user__date_joined') users = User.objects.exclude(username='root') return render_template('user/list.html', { 'users': list(users) }) def details(request, user_name): '''Displays details about a profile''' usr = get_object_or_404(User, username=user_name) try: profile = Profile.objects.get(user=usr) except SiteProfileNotAvailable: raise Http404 return render_template('user/profile.html', { 'usr': usr, 'profile': profile }) def login_register_view(request, register=False, modal=False): csrf_tk = {} csrf_tk.update(csrf(request)) login_error = False if request.method == 'POST' and not register: login_form = LoginForm(request.POST) if login_form.is_valid(): username = request.POST['username'] password = request.POST['password'] user = authenticate(username=username, password=password) if user is not None: login(request, user) request.session['get_token'] = generate_token() if not 'remember' in request.POST: request.session.set_expiry(0) if 'next' in request.POST: return redirect(request.POST['next'] or '/') else: return redirect('/') else: login_error = 'Bad user/password' else: login_error = 'Form invalid' login_form = LoginForm() register_form = RegisterForm() if request.method == 'POST' and register: register_form = RegisterForm(request.POST) if register_form.is_valid(): data = register_form.data user = User.objects.create_user( data['username'], data['email'], data['password']) profile = Profile(user=user) profile.save() user.backend = 'django.contrib.auth.backends.ModelBackend' login(request, user) return redirect('/user/register/success') csrf_tk['register_form'] = register_form csrf_tk['login_error'] = login_error csrf_tk['login_form'] = login_form csrf_tk['register'] = register if 'next' in request.GET: csrf_tk['next'] = request.GET.get('next') if not modal: return render_template('user/login_register.html', csrf_tk) else: return render_template('user/login_register_modal.html', csrf_tk) @login_required def register_success(request): return render_template('user/register_success.html') @login_required def logout_view(request): logout(request) request.session.clear() return redirect('/') @login_required def settings_profile(request): profile = Profile.objects.get(user=request.user) if request.method == 'POST': form = ProfileForm(request.POST) if form.is_valid(): profile.about = form.data['about'] request.user.email = form.data['email'] languages = request.POST.getlist('languages') if 'show_email' in form.data: profile.show_email = form.data['show_email'] else: profile.show_email = False # Save the profile # and redirect the user to the configuration space # with message indicate the state of the operation try: profile.languages.clear() from resrc.language.models import Language for lang in languages: profile.languages.add(Language.objects.get(pk=lang)) profile.save() request.user.save() except: messages.error(request, 'Error') return redirect(reverse('user-settings')) messages.success( request, 'Update successful.') return redirect(reverse('user-settings')) else: return render_template('user/settings_profile.html', { 'usr': request.user, 'form': form, }) else: form = ProfileForm(initial={ 'about': profile.about, 'email': request.user.email, 'languages': profile.languages.all(), 'show_email': profile.show_email } ) return render_template('user/settings_profile.html', { 'usr': request.user, 'form': form }) @login_required def settings_account(request): if request.method == 'POST': form = ChangePasswordForm(request.POST, request.user) if form.is_valid(): try: request.user.set_password(form.data['password_new']) request.user.save() messages.success( request, 'Password updated.') return redirect(reverse('user-settings')) except: messages.error(request, 'Error while updating your password.') return redirect(reverse('user-settings')) else: return render_template('user/settings_account.html', { 'usr': request.user, 'form': form, }) else: form = ChangePasswordForm(request.user) return render_template('user/settings_account.html', { 'usr': request.user, 'form': form, })
Python
0
@@ -827,16 +827,40 @@ ='root') +.order_by('date_joined') %0A ret
de69aa9c04ea34bd31b5cf17e8e3cfdf17b0b9df
Naive user/password login w.o. encryption
1wire/publish.py
1wire/publish.py
#!/usr/bin/env python import os import argparse import time import threading from Queue import Queue import mosquitto queue = Queue(100) def main(host, port, sensors): print "#######################" print "Temperature poller v0.2" print "#######################" print "Using sensors:" pollers = [] for sensor in sensors: print " {sensor}".format(sensor=sensor) p = PollerThread(sensor) p.start() pollers.append(p) publisher = PublisherThread(host, port) publisher.start() try: raw_input("Press key to exit") except (KeyboardInterrupt, SystemExit): pass finally: for poller in pollers: poller.stop() publisher.stop() # Make sure publisher is not stuck waiting for data queue.put(None) class StoppableThread(threading.Thread): def __init__(self): super(StoppableThread, self).__init__() self._stop = threading.Event() def stop(self): self._stop.set() def is_stopped(self): return self._stop.isSet() class PollerThread(StoppableThread): def __init__(self, sensor): super(PollerThread, self).__init__() self.sensor = sensor self.id = os.path.dirname(sensor) def run(self): global queue while not self.is_stopped(): temp = self.get_temp() queue.put((self.id, temp)) time.sleep(1) def get_temp(self): temp = -1 with open(self.sensor, 'rb') as s: temp = s.read() return temp class PublisherThread(StoppableThread): def __init__(self, host, port): super(PublisherThread, self).__init__() self.mqttc = mosquitto.Mosquitto("python_pub") self.mqttc.will_set("/event/dropped", "Sorry, I seem to have died.") self.mqttc.connect(host, port, 60, True) def run(self): global queue while not self.is_stopped(): ret = queue.get() if ret: (id, temp) = ret queue.task_done() self.mqttc.publish("iot_lab/temp", "{id}:{temp}".format(id=id, temp=temp)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-H', '--host', required=True) parser.add_argument('-P', '--port', default="1883") parser.add_argument('-s', '--sensors', default=[], action='append', dest="sensors", help='path(s) to sensors, separated by space') args = parser.parse_args() main(args.host, args.port, args.sensors)
Python
0.99999
@@ -153,16 +153,32 @@ t, port, + user, password, sensors @@ -180,16 +180,16 @@ nsors):%0A - prin @@ -531,16 +531,46 @@ st, port +, user=user, password=password )%0A pu @@ -1711,16 +1711,42 @@ st, port +, user=None, password=None ):%0A @@ -1920,16 +1920,125 @@ died.%22)%0A + if user is not None and password is not None:%0A self.mqttc.username_pw_set(user, password)%0A @@ -2545,16 +2545,128 @@ %221883%22)%0A + parser.add_argument('-u', '--user', default=None)%0A parser.add_argument('-p', '--password', default=None)%0A pars @@ -2823,16 +2823,16 @@ _args()%0A - main @@ -2853,16 +2853,42 @@ gs.port, + args.user, args.password, args.se
77bd24b0efa3c53b4c4cfdf5b6db00db8cf69063
Replace HOST_CFG with "host" as the former has been marked as deprecated.
tools/build_defs/apple/apple_genrule.bzl
tools/build_defs/apple/apple_genrule.bzl
# Copyright 2016 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. load(":shared.bzl", "apple_action", "DARWIN_EXECUTION_REQUIREMENTS", "APPLE_FRAGMENTS") def _compute_make_variables(resolved_srcs, files_to_build): variables = {"SRCS": cmd_helper.join_paths(" ", resolved_srcs), "OUTS": cmd_helper.join_paths(" ", files_to_build)} if len(resolved_srcs) == 1: variables["<"] = list(resolved_srcs)[0].path if len(files_to_build) == 1: variables["@"] = list(files_to_build)[0].path return variables def _apple_genrule(ctx): resolved_srcs = set() if not ctx.outputs.outs: fail("apple_genrule must have one or more outputs", attr="outs") files_to_build = set(ctx.outputs.outs) if ctx.attr.executable and len(files_to_build) > 1: fail("if genrules produce executables, they are allowed only one output. " + "If you need the executable=1 argument, then you should split this " + "genrule into genrules producing single outputs", attr="executable") label_dict = {} for dep in ctx.attr.srcs: resolved_srcs += dep.files label_dict[dep.label] = dep.files resolved_inputs, argv, runfiles_manifests = ctx.resolve_command( command=ctx.attr.cmd, attribute="cmd", expand_locations=True, make_variables=_compute_make_variables(set(resolved_srcs), files_to_build), tools=ctx.attr.tools, label_dict=label_dict, execution_requirements=DARWIN_EXECUTION_REQUIREMENTS) message = ctx.attr.message or "Executing apple_genrule" env = ctx.configuration.default_shell_env env += ctx.fragments.apple.apple_host_system_env() apple_action(ctx, inputs=list(resolved_srcs) + resolved_inputs, outputs=list(files_to_build), env=env, command=argv, progress_message="%s %s" % (message, ctx), mnemonic="Genrule", input_manifests=runfiles_manifests) return struct(files=files_to_build, data_runfiles=ctx.runfiles(transitive_files=files_to_build)) _apple_genrule_inner = rule( implementation=_apple_genrule, attrs={ "srcs": attr.label_list(allow_files=True), "tools": attr.label_list(cfg=HOST_CFG, allow_files=True), "outs": attr.output_list(mandatory=True), "cmd": attr.string(mandatory=True), "message": attr.string(), "output_licenses": attr.license(), "executable": attr.bool(default=False), }, output_to_genfiles = True, fragments=APPLE_FRAGMENTS) """Genrule which provides Apple specific environment and make variables. This mirrors the native genrule except that it provides a different set of make variables. This rule will only run on a Mac. Example of use: load("//tools/build_defs/apple/apple_genrule.bzl", "apple_genrule") apple_genrule( name = "world", outs = ["hi"], cmd = "touch $(@)", ) This rule also does location expansion, much like the native genrule. For example, $(location hi) may be used to refer to the output in the above example. The set of make variables that are supported for this rule: OUTS: The outs list. If you have only one output file, you can also use $@. SRCS: The srcs list (or more precisely, the pathnames of the files corresponding to labels in the srcs list). If you have only one source file, you can also use $<. <: srcs, if it's a single file. @: outs, if it's a single file. The following environment variables are added to the rule action: DEVELOPER_DIR: The base developer directory as defined on Apple architectures, most commonly used in invoking Apple tools such as xcrun. """ def apple_genrule( name, cmd, executable = False, outs = [], **kwargs): if executable: if len(outs) != 1: fail("apple_genrule, if executable, must have exactly one output") intermediate_out = outs[0] + "_nonexecutable" _apple_genrule_inner( name = name + "_nonexecutable", outs = [intermediate_out], cmd = cmd, **kwargs) native.genrule( name = name, outs = outs, srcs = [intermediate_out], cmd = "cp $< $@", executable = True, ) else: _apple_genrule_inner( name = name, outs = outs, cmd = cmd, **kwargs)
Python
0.000332
@@ -2795,16 +2795,14 @@ cfg= -HOST_CFG +%22host%22 , al
7f02d1f2b23bbf27e99d87ef23c491823875c3d1
fix bin none subprocess.TimeoutExpired
bin/virt.py
bin/virt.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import yaml import subprocess import os import sys def proc(cmd,time = 120,sh = True ): print("$".format(cmd)) p = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=sh) try: outs, errs = p.communicate(timeout=time) except subprocess.TimeoutExpired: p.kill() outs, errs = p.communicate() return outs,errs,p ROOT_PATH=os.path.dirname(__file__) print(proc("sudo apt-get update")[0]) print(proc("sudo apt-get install -qq sshpass")[0]) print(proc("ssh-keygen -b 2048 -t rsa -f $HOME/.ssh/id_rsa -q -N \"\"")[0]) print(proc("docker info")[0]) print(proc("docker version")[0]) with open('meta/main.yml', 'r') as f: doc = yaml.load(f) for i in doc["galaxy_info"]["platforms"]: distrib = i["name"] for x in i["versions"]: dockerfile = "{}/../dockerfile/{}/{}/Dockerfile".format(ROOT_PATH,distrib,x) if os.path.exists(dockerfile): print(proc("docker build -f {} -t {}_{} .".format(dockerfile,distrib,x))[0]) print(proc("docker run -d --cap-add=SYS_ADMIN -it -v /sys/fs/cgroup:/sys/fs/cgroup:ro {}_{}".format(distrib,x))[0]) else: print("Critical error. Not found docker files {}".format(dockerfile)) sys.exit(1) proc("sleep 10") proc("docker inspect --format '{{.Config.Image}} ansible_ssh_host={{.NetworkSettings.IPAddress}}' `docker ps -q` >> /etc/ansible/hosts") for item in proc("docker inspect --format '{{ .NetworkSettings.IPAddress }}' \`docker ps -q\`")[0]: proc("ssh-keyscan -H {} >> ~/.ssh/known_hosts".format(item)) proc("sshpass -p '000000' ssh-copy-id root@{}".format(item))
Python
0.000001
@@ -245,21 +245,8 @@ sh)%0A - try:%0A @@ -290,100 +290,8 @@ me)%0A - except subprocess.TimeoutExpired:%0A p.kill()%0A outs, errs = p.communicate()%0A
939cc5adba6f5a95aac317134eb841838a0bff3f
Tweak inheritance
rest_framework/viewsets.py
rest_framework/viewsets.py
""" ViewSets are essentially just a type of class based view, that doesn't provide any method handlers, such as `get()`, `post()`, etc... but instead has actions, such as `list()`, `retrieve()`, `create()`, etc... Actions are only bound to methods at the point of instantiating the views. user_list = UserViewSet.as_view({'get': 'list'}) user_detail = UserViewSet.as_view({'get': 'retrieve'}) Typically, rather than instantiate views from viewsets directly, you'll regsiter the viewset with a router and let the URL conf be determined automatically. router = DefaultRouter() router.register(r'users', UserViewSet, 'user') urlpatterns = router.urls """ from __future__ import unicode_literals from functools import update_wrapper from django.utils.decorators import classonlymethod from rest_framework import views, generics, mixins class ViewSetMixin(object): """ This is the magic. Overrides `.as_view()` so that it takes an `actions` keyword that performs the binding of HTTP methods to actions on the Resource. For example, to create a concrete view binding the 'GET' and 'POST' methods to the 'list' and 'create' actions... view = MyViewSet.as_view({'get': 'list', 'post': 'create'}) """ @classonlymethod def as_view(cls, actions=None, **initkwargs): """ Because of the way class based views create a closure around the instantiated view, we need to totally reimplement `.as_view`, and slightly modify the view function that is created and returned. """ # The suffix initkwarg is reserved for identifing the viewset type # eg. 'List' or 'Instance'. cls.suffix = None # sanitize keyword arguments for key in initkwargs: if key in cls.http_method_names: raise TypeError("You tried to pass in the %s method name as a " "keyword argument to %s(). Don't do that." % (key, cls.__name__)) if not hasattr(cls, key): raise TypeError("%s() received an invalid keyword %r" % ( cls.__name__, key)) def view(request, *args, **kwargs): self = cls(**initkwargs) # We also store the mapping of request methods to actions, # so that we can later set the action attribute. # eg. `self.action = 'list'` on an incoming GET request. self.action_map = actions # Bind methods to actions # This is the bit that's different to a standard view for method, action in actions.items(): handler = getattr(self, action) setattr(self, method, handler) # Patch this in as it's otherwise only present from 1.5 onwards if hasattr(self, 'get') and not hasattr(self, 'head'): self.head = self.get # And continue as usual return self.dispatch(request, *args, **kwargs) # take name and docstring from class update_wrapper(view, cls, updated=()) # and possible attributes set by decorators # like csrf_exempt from dispatch update_wrapper(view, cls.dispatch, assigned=()) # We need to set these on the view function, so that breadcrumb # generation can pick out these bits of information from a # resolved URL. view.cls = cls view.suffix = initkwargs.get('suffix', None) return view def initialize_request(self, request, *args, **kargs): """ Set the `.action` attribute on the view, depending on the request method. """ request = super(ViewSetMixin, self).initialize_request(request, *args, **kargs) self.action = self.action_map.get(request.method.lower()) return request class ViewSet(ViewSetMixin, views.APIView): """ The base ViewSet class does not provide any actions by default. """ pass class GenericViewSet(ViewSetMixin, generics.GenericAPIView): """ The GenericViewSet class does not provide any actions by default, but does include the base set of generic view behavior, such as the `get_object` and `get_queryset` methods. """ pass class ReadOnlyModelViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, ViewSetMixin, generics.GenericAPIView): """ A viewset that provides default `list()` and `retrieve()` actions. """ pass class ModelViewSet(mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, mixins.ListModelMixin, ViewSetMixin, generics.GenericAPIView): """ A viewset that provides default `create()`, `retrieve()`, `update()`, `partial_update()`, `destroy()` and `list()` actions. """ pass
Python
0.000001
@@ -4428,72 +4428,22 @@ -ViewSetMixin,%0A generics. Generic -API View +Set ):%0A @@ -4788,65 +4788,22 @@ -ViewSetMixin,%0A generics. Generic -API View +Set ):%0A
45e9ddce96b4fdadca63a50bf2808c7f98520d99
print query on error
data_upload/util/bq_wrapper.py
data_upload/util/bq_wrapper.py
''' Created on Jan 22, 2017 Copyright 2017, Institute for Systems Biology. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. @author: michael ''' from google.cloud import bigquery def query_bq_table(query, use_legacy, project, log): log.info('\t\tquerying bq: %s' % query) client = bigquery.Client(project=project) query_results = client.run_sync_query(query) # Use standard SQL syntax for queries. # See: https://cloud.google.com/bigquery/sql-reference/ query_results.use_legacy_sql = use_legacy query_results.run() log.info('\t\tdone querying bq: %s' % query) return query_results def fetch_paged_results(query_results, fetch_count, project_name, page_token, log): log.info('\t\trequesting %d rows %s' % (fetch_count, (' for ' + project_name) if project_name else '')) rows, total_rows, page_token = query_results.fetch_data( max_results=fetch_count, page_token=page_token) log.info('\t\tfetched %d rows %s' % (len(rows), (' for ' + project_name) if project_name else '')) return total_rows, rows, page_token
Python
0.000446
@@ -1008,16 +1008,30 @@ legacy%0D%0A + try:%0D%0A quer @@ -1047,16 +1047,108 @@ .run()%0D%0A + except:%0D%0A log.exception('problem with query:%5Cn%7B%7D'.format(query))%0D%0A raise%0D%0A log.
8928d1987dd05def51b4d7abddbbd3658b300adb
Add handler for django.request logger to get reports of 5xx errors
learnwithpeople/settings.py
learnwithpeople/settings.py
""" Django settings for learnwithpeople project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ from __future__ import absolute_import # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) path = lambda *a: os.path.join(BASE_DIR, *a) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ ADMINS = ( ('Admin', os.environ.get('ADMIN_EMAIL', 'admin@localhost') ), ) # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ.get('SECRET_KEY', 'youshouldchangethis') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False TEMPLATE_DEBUG = DEBUG # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # 3rd party apps 'crispy_forms', 'localflavor', 's3direct', # own 'studygroups', 'interest', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'learnwithpeople.urls' WSGI_APPLICATION = 'learnwithpeople.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en' TIME_ZONE = 'US/Central' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = path('static_serve') STATICFILES_DIRS = ( path('static'), ) STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # other finders.. ) MEDIA_URL = '/media/' MEDIA_ROOT = path('upload') TEMPLATE_DIRS = ( path('templates'), ) CRISPY_TEMPLATE_PACK = 'bootstrap3' EMAIL_HOST = os.environ.get('EMAIL_HOST') EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER') EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD') DEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL', 'webmaster@localhost') SERVER_EMAIL = "sysadmin@p2up.org" ##### Heroku config import dj_database_url DATABASES['default'] = dj_database_url.config(default='sqlite:///{0}'.format(path('db.sqlite3'))) # Honor the 'X-Forwarded-Proto' header for request.is_secure() SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # Allow all host headers ALLOWED_HOSTS = ['*'] ##### AWS upload config AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID') AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY') AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME') S3DIRECT_REGION = 'us-east-1' S3DIRECT_DESTINATIONS = { # Allow anybody to upload jpeg's and png's. 'imgs': ('uploads/imgs', lambda u: True, ['image/jpeg', 'image/png'],), } ##### Twilio config TWILIO_ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID') TWILIO_AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN') TWILIO_NUMBER = os.environ.get('TWILIO_NUMBER') LOGIN_REDIRECT_URL = '/facilitator/' DOMAIN = os.environ.get('DOMAIN', 'example.net') ####### Celery config ####### BROKER_URL = os.environ.get('BROKER_URL', 'amqp://guest:guest@localhost//') from celery.schedules import crontab CELERYBEAT_SCHEDULE = { # Executes every Monday morning at 7:30 A.M 'gen_reminders': { 'task': 'studygroups.tasks.gen_reminders', 'schedule': crontab(minute='*/10'), }, 'send_reminders': { 'task': 'studygroups.tasks.send_reminders', 'schedule': crontab(minute='*/10'), }, 'weekly_update': { 'task': 'studygroups.tasks.weekly_update', 'schedule': crontab(hour=10, minute=0, day_of_week='monday'), }, 'daily_backup': { 'task': 'backup.tasks.make_backup', 'schedule': crontab(hour=1, minute=0), }, } LOGGING = { 'version': 1, 'dissable_existing_loggers': False, 'handlers': { 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', }, 'mail_admins': { 'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler', 'include_html': True, }, }, 'loggers': { '': { 'handlers': ['mail_admins', 'console'], 'level': 'DEBUG', } }, } #### Backup config #### BACKUP_DIR = os.environ.get('BACKUP_DIR') # Directory where backups will be stored locally BACKUP_AWS_ACCESS_KEY_ID = os.environ.get('BACKUP_AWS_ACCESS_KEY_ID') # AWS key with access to backup bucket BACKUP_AWS_SECRET_ACCESS_KEY = os.environ.get('BACKUP_AWS_SECRET_ACCESS_KEY') # AWS secret for above key BACKUP_AWS_STORAGE_BUCKET_NAME = os.environ.get('BACKUP_AWS_STORAGE_BUCKET_NAME') # Name of the bucket where backups should be stored BACKUP_AWS_KEY_PREFIX = os.environ.get('BACKUP_AWS_KEY_PREFIX') # Prefix for generated key on AWS s3 ##### Support for settings_local.py try: from .settings_local import * except ImportError: pass
Python
0
@@ -5136,32 +5136,174 @@ 'loggers': %7B%0A + 'django.request': %7B%0A 'handlers': %5B'mail_admins'%5D,%0A 'level': 'ERROR',%0A 'propagate': False,%0A %7D,%0A '': %7B%0A
1a417f83847030d47ac3eb596f61145b9768bc85
bump the schema version for samples
crits/samples/sample.py
crits/samples/sample.py
import json from mongoengine import Document from mongoengine import StringField, ListField from mongoengine import IntField from django.conf import settings from crits.samples.migrate import migrate_sample from crits.core.crits_mongoengine import CritsBaseAttributes from crits.core.crits_mongoengine import CritsSourceDocument from crits.core.crits_mongoengine import CritsActionsDocument from crits.core.crits_mongoengine import json_handler from crits.core.data_tools import format_file from crits.core.fields import getFileField class Sample(CritsBaseAttributes, CritsSourceDocument, CritsActionsDocument, Document): """Sample object""" meta = { "collection": settings.COL_SAMPLES, "crits_type": 'Sample', "latest_schema_version": 4, "shard_key": ('md5',), "schema_doc": { 'filename': 'The name of the last file that was uploaded with this'\ 'MD5', 'filenames': 'A list of filenames this binary has gone by.', 'filetype': 'The filetype of the file', 'mimetype': 'The mimetype of the file', 'size': 'The size of the file', 'md5': 'The MD5 of the file', 'sha1': 'The SHA1 of the file', 'sha256': 'The SHA256 of the file', 'ssdeep': 'The ssdeep of the file', 'impfuzzy': 'The impfuzzy of the executable file', 'campaign': 'List [] of campaigns using this file', 'source': 'List [] of sources that provided this file', 'created': 'ISODate of when this file was uploaded', 'modified': 'ISODate of when the file metadata was last modified', 'filedata': 'The ObjectId of the file in GridFS' }, "jtable_opts": { 'details_url': 'crits.samples.views.detail', 'details_url_key': 'md5', 'default_sort': "created DESC", 'searchurl': 'crits.samples.views.samples_listing', 'fields': [ "filename", "size", "filetype", "created", "modified", "campaign", "source", "md5", "id", "status"], 'jtopts_fields': [ "details", "filename", "size", "filetype", "created", "campaign", "source", "md5", "status", "favorite", "id"], 'hidden_fields': ["md5"], 'linked_fields': ["filename", "source", "campaign", "filetype"], 'details_link': 'details', 'no_sort': ['details', 'id'] }, } filedata = getFileField(collection_name=settings.COL_SAMPLES) filename = StringField(required=True) filenames = ListField(StringField()) filetype = StringField() md5 = StringField(required=True) mimetype = StringField() sha1 = StringField() sha256 = StringField() size = IntField(default=0) ssdeep = StringField() impfuzzy = StringField() def migrate(self): migrate_sample(self) def add_file_data(self, file_data): self._generate_file_metadata(file_data) self.filedata = file_data def add_file_obj(self, file_obj): data = file_obj.read() self._generate_file_metadata(data) self.filedata = data def _generate_file_metadata(self, data): import pydeep import magic from hashlib import md5, sha1, sha256 try: import pyimpfuzzy except ImportError: pass try: self.filetype = magic.from_buffer(data) except: self.filetype = "Unavailable" try: mimetype = magic.from_buffer(data, mime=True) if mimetype: self.mimetype = mimetype.split(";")[0] if not mimetype: self.mimetype = "unknown" except: self.mimetype = "Unavailable" self.size = len(data) # this is a shard key. you can't modify it once it's set. # MongoEngine will still mark the field as modified even if you set it # to the same value. if not self.md5: self.md5 = md5(data).hexdigest() self.sha1 = sha1(data).hexdigest() self.sha256 = sha256(data).hexdigest() try: self.ssdeep = pydeep.hash_bytes(data) except: self.ssdeep = None try: self.impfuzzy = pyimpfuzzy.get_impfuzzy_data(data) except: self.impfuzzy = None def is_pe(self): """ Is this a PE file. """ return self.filedata.grid_id != None and self.filedata.read(2) == "MZ" def is_pdf(self): """ Is this a PDF. """ return self.filedata.grid_id != None and "%PDF-" in self.filedata.read(1024) def discover_binary(self): """ Queries GridFS for a matching binary to this sample document. """ from crits.core.mongo_tools import mongo_connector fm = mongo_connector("%s.files" % self._meta['collection']) objectid = fm.find_one({'md5': self.md5}, {'_id': 1}) if objectid: self.filedata.grid_id = objectid['_id'] self.filedata._mark_as_changed() def set_filenames(self, filenames): """ Set the Sample filenames to a specified list. :param filenames: The filenames to set. :type filenames: list """ if isinstance(filenames, list): self.filenames = filenames def _json_yaml_convert(self, exclude=[]): """ Helper to convert to a dict before converting to JSON. :param exclude: list of fields to exclude. :type exclude: list :returns: json """ d = self.to_dict(exclude) if 'filedata' not in exclude: (d['filedata'], ext) = format_file(self.filedata.read(), 'base64') return json.dumps(d, default=json_handler)
Python
0
@@ -779,17 +779,17 @@ rsion%22: -4 +5 ,%0A
d590307b0d59ac7163016197e3de0e8bced377d2
Fix form typo
account_verification_flask/forms/forms.py
account_verification_flask/forms/forms.py
from flask_wtf import Form from wtforms import TextField, PasswordField, IntegerField from wtforms.validators import DataRequired, Length, Email, EqualTo class RegisterForm(Form): name = TextField( 'Tell us your name', validators = [DataRequired(message = "Name is required"), Length(min = 3,message = "Name must greater than 3 chars")] ) email = TextField( 'Enter your E-mail', validators = [DataRequired("E-mail is required"), Email(message = "Invalid E-mail address")] ) password = PasswordField( 'Password', validators = [DataRequired("Password is required")] ) country_code = TextField( 'Coundtry Code', validators = [DataRequired("Country code is required"), Length(min = 1, max = 4, message = "Country must be between 1 and 4 chars")] ) phone_number = IntegerField( 'Phone Number', validators = [DataRequired("Valid phone number is required")] ) class ResendCodeForm(Form): email = TextField( 'E-mail', validators = [DataRequired("E-mail is required"), Email(message = "Invalid E-mail address")] ) class VerifyCodeForm(ResendCodeForm): verification_code = TextField( 'Verification Code', validators = [DataRequired("Verification code is required")] )
Python
0.000191
@@ -676,17 +676,16 @@ 'Coun -d try Code
a1826e5507a083cdcb906c411e97031bb5546eae
Remove debug print
cax/qsub.py
cax/qsub.py
""" Access the cluster. Easy to use functions to make use of the cluster facilities. This checks the available slots on the requested queue, creates the scripts to submit, submits the jobs, and cleans up afterwards. Example usage:: >>> import qsub >>> qsub.submit_job('touch /data/hisparc/test', 'job_1', 'express') """ import logging import os from cax import config import subprocess import tempfile from distutils.spawn import find_executable def which(program): """Check if a command line program is available An Exception is raised if the program is not available. :param program: name or program to check for, e.g. 'wget'. """ path = find_executable(program) if not path: raise Exception('The program %s is not available.' % program) def submit_job(script, extra=''): """Submit a job :param script: contents of the script to run. :param name: name for the job. :param extra: optional extra arguments for the sbatch command. """ which('sbatch') fileobj = create_script(script) # Effect of the arguments for sbatch: # http://slurm.schedmd.com/sbatch.html sbatch = ('sbatch {extra} {script}' .format(script=fileobj.name, extra=extra)) try: result = subprocess.check_output(sbatch, stderr=subprocess.STDOUT, shell=True, timeout=120) logging.info(result) except subprocess.TimeoutExpired as e: logging.error("Process timeout") except Exception as e: logging.exception(e) delete_script(fileobj) def create_script(script): """Create script as temp file to be run on cluster""" fileobj = tempfile.NamedTemporaryFile(delete=True, suffix='.sh', mode='wt', buffering=1) fileobj.write(script) os.chmod(fileobj.name, 0o774) return fileobj def delete_script(fileobj): """Delete script after submitting to cluster :param script_path: path to the script to be removed """ fileobj.close() def get_number_in_queue(host=config.get_hostname(), partition=''): print (len(get_queue(host, partition)), host, partition) return len(get_queue(host, partition)) def get_queue(host=config.get_hostname(), partition=''): """Get list of jobs in queue""" if host == "midway-login1": args = {'partition': 'sandyb', 'user' : config.get_user()} elif host == 'tegner-login-1': args = {'partition': 'main', 'user' : 'bobau'} else: return [] if partition == '': command = 'squeue --user={user} -o "%.30j"'.format(**args) else: args['partition'] = partition command = 'squeue --partition={partition} --user={user} -o "%.30j"'.format(**args) try: queue = subprocess.check_output(command, shell=True, timeout=120) except subprocess.TimeoutExpired as e: logging.error("Process timeout") return [] except Exception as e: logging.exception(e) return [] queue_list = queue.rstrip().decode('ascii').split() if len(queue_list) > 1: return queue_list[1:] return []
Python
0.000003
@@ -2333,16 +2333,18 @@ ''):%0A + # print (
55e67e6c2d230530726c59c11615cbf4e16786fb
Fix RedirectView import
base/views.py
base/views.py
# -*- coding: utf-8 -*- """ This file contains some generic purpouse views """ # standard library # django from django.contrib.auth.decorators import login_required from django.core.exceptions import PermissionDenied from django.shortcuts import get_object_or_404 from django.shortcuts import render from django.shortcuts import render_to_response from django.template import RequestContext from django.urls import reverse from django.utils.decorators import method_decorator from django.utils.translation import ugettext_lazy as _ from django.views.generic.detail import DetailView from django.views.generic.edit import CreateView from django.views.generic.edit import DeleteView from django.views.generic.edit import UpdateView from django.views.generic.list import ListView from django.views.generic.list import RedirectView # utils from base.view_utils import clean_query_string from base.utils import camel_to_underscore @login_required def index(request): """ view that renders a default home""" return render(request, 'index.pug') def bad_request_view(request): return render_to_response('exceptions/400.jade', {}, context_instance=RequestContext(request)) def permission_denied_view(request): return render_to_response('exceptions/403.jade', {}, context_instance=RequestContext(request)) def page_not_found_view(request): return render_to_response('exceptions/404.jade', {}, context_instance=RequestContext(request)) def error_view(request): return render_to_response('exceptions/500.jade', {}, context_instance=RequestContext(request)) class PermissionRequiredMixin: permission_required = None def check_permission_required(self): if self.permission_required: if not self.request.user.has_perm(self.permission_required): raise PermissionDenied class BaseDetailView(DetailView, PermissionRequiredMixin): def get_title(self): verbose_name = self.model._meta.verbose_name return '{}: {}'.format(verbose_name, self.object).capitalize() def get_context_data(self, **kwargs): context = super(BaseDetailView, self).get_context_data(**kwargs) context['opts'] = self.model._meta context['title'] = self.get_title() return context @method_decorator(login_required) def dispatch(self, *args, **kwargs): self.check_permission_required() return super(BaseDetailView, self).dispatch(*args, **kwargs) class BaseCreateView(CreateView, PermissionRequiredMixin): def get_context_data(self, **kwargs): context = super(BaseCreateView, self).get_context_data(**kwargs) verbose_name = self.model._meta.verbose_name context['opts'] = self.model._meta context['title'] = _('Create %s') % verbose_name context['cancel_url'] = self.get_cancel_url() return context def get_cancel_url(self): model_name = self.model.__name__.lower() return reverse('{}_list'.format(model_name)) @method_decorator(login_required) def dispatch(self, *args, **kwargs): self.check_permission_required() return super(BaseCreateView, self).dispatch(*args, **kwargs) class BaseSubModelCreateView(CreateView, PermissionRequiredMixin): """ Create view when the object is nested within a parent object """ @method_decorator(login_required) def dispatch(self, *args, **kwargs): self.check_permission_required() return super(BaseSubModelCreateView, self).dispatch(*args, **kwargs) def get_form_kwargs(self): model_underscore_name = camel_to_underscore(self.parent_model.__name__) obj = get_object_or_404( self.parent_model, pk=self.kwargs['{}_id'.format(model_underscore_name)] ) self.object = self.model(**{model_underscore_name: obj}) return super(BaseSubModelCreateView, self).get_form_kwargs() def get_context_data(self, **kwargs): context = super(BaseSubModelCreateView, self).get_context_data( **kwargs ) model_underscore_name = camel_to_underscore(self.parent_model.__name__) obj = get_object_or_404( self.parent_model, pk=self.kwargs['{}_id'.format(model_underscore_name)] ) context[model_underscore_name] = obj context['title'] = _('Create %s') % self.model._meta.verbose_name context['cancel_url'] = obj.get_absolute_url() return context class BaseListView(ListView, PermissionRequiredMixin): paginate_by = 25 page_kwarg = 'p' def get_ordering(self): """ Return the field or fields to use for ordering the queryset. """ order = self.request.GET.get('_o') if order: return (order,) return self.ordering def get_context_data(self, **kwargs): context = super(BaseListView, self).get_context_data(**kwargs) context['opts'] = self.model._meta context['clean_query_string'] = clean_query_string(self.request) context['q'] = self.request.GET.get('q') context['title'] = self.model._meta.verbose_name_plural.capitalize() return context @method_decorator(login_required) def dispatch(self, *args, **kwargs): self.check_permission_required() return super(BaseListView, self).dispatch(*args, **kwargs) class BaseUpdateView(UpdateView, PermissionRequiredMixin): @method_decorator(login_required) def dispatch(self, *args, **kwargs): self.check_permission_required() return super(BaseUpdateView, self).dispatch(*args, **kwargs) def get_context_data(self, **kwargs): context = super(BaseUpdateView, self).get_context_data(**kwargs) context['opts'] = self.model._meta context['cancel_url'] = self.object.get_absolute_url() context['title'] = _('Update %s') % str(self.object) return context class BaseDeleteView(DeleteView, PermissionRequiredMixin): @method_decorator(login_required) def dispatch(self, *args, **kwargs): self.check_permission_required() return super(BaseDeleteView, self).dispatch(*args, **kwargs) def get_context_data(self, **kwargs): context = super(BaseDeleteView, self).get_context_data(**kwargs) context['opts'] = self.model._meta context['title'] = _('Delete %s') % str(self.object) return context def get_success_url(self): model_name = self.model.__name__.lower() return reverse('{}_list'.format(model_name)) class BaseRedirectView(RedirectView, PermissionRequiredMixin): @method_decorator(login_required) def dispatch(self, *args, **kwargs): self.check_permission_required() return super(BaseRedirectView, self).dispatch(*args, **kwargs)
Python
0
@@ -789,37 +789,32 @@ go.views.generic -.list import Redirect
ddaa9a687019794f61c019c5e3139a0b9ceaf521
enable C++ exception-handling
binding.gyp
binding.gyp
{ "target_defaults": { "conditions": [ ['OS=="win"', { }, { 'cflags' : [ "-fexceptions" ], 'cflags_cc' : [ "-fexceptions" ] } ] ], "configurations": { "Release": { 'msvs_settings': { 'VCCLCompilerTool': { 'WholeProgramOptimization': 'true', # /GL, whole program optimization, needed for LTCG 'OmitFramePointers': 'true', 'EnableFunctionLevelLinking': 'true', 'EnableIntrinsicFunctions': 'true', 'RuntimeTypeInfo': 'false', 'ExceptionHandling': '1', 'AdditionalOptions': [ '/MP' ] }, 'VCLibrarianTool': { 'AdditionalOptions': [ '/LTCG', # link time code generation ], }, 'VCLinkerTool': { 'LinkTimeCodeGeneration': 1, # link-time code generation 'OptimizeReferences': 2, # /OPT:REF 'EnableCOMDATFolding': 2, # /OPT:ICF 'LinkIncremental': 1, # disable incremental linking } } } } }, "targets": [ { "target_name": "inchi", "msvs_guid": "F1B917E2-75AB-A243-6D62-3C7938A1EF68", "include_dirs": [ "<!(node -e \"require('nan')\")" ], "dependencies": [ "deps/inchi/inchi.gyp:libINCHIAPI" ], "sources": [ "src/node-inchi.cc", "src/molecule.cc", "src/atom.cc", "src/molecule_wrap.cc", "src/molecule_native.cc", "src/inchi_atom.cc", "src/get_inchi_data.cc", "src/get_struct_from_inchi_data.cc", "src/get_inchi.cc" ], "conditions": [ ['OS=="win"', { }, { 'cflags_cc' : [ "-fexceptions" ] } ] ], }, { "target_name": "test", "type": "executable", "sources": [ "src/test/TestMain.cc", "src/test/hello.cc", "src/test/test_molecule.cc", "src/test/test_inchi_atom.cc", "src/test/test_get_struct_from_inchi.cc", "src/molecule_native.cc", "src/get_inchi_data.cc", "src/get_struct_from_inchi_data.cc", "src/inchi_atom.cc" ], "include_dirs": [ ".", "src", "<!(node -e \"require('cppunitlite')\")", "<!(node -e \"require('nan')\")" ], "dependencies": [ "node_modules/cppunitlite/binding.gyp:CppUnitLite", "deps/inchi/inchi.gyp:libINCHIAPI" ], "conditions": [ ['OS=="win"', { }, { 'cflags_cc': [ '-fexceptions' ] } ] ], # sample unit test } ] }
Python
0.000012
@@ -643,16 +643,25 @@ '/MP' +, '/EHsc' %0A
ac6c5799307541c6de646addcbf103da1033a830
Correct html coding error.
smth_worklife_hot.py
smth_worklife_hot.py
#!/usr/bin/env python # coding=utf-8 import requests as rq import random as rd from BeautifulSoup import BeautifulSoup as bs from collections import namedtuple import datetime, time import sys reload(sys) sys.setdefaultencoding('utf-8') Topic = namedtuple('Topic', ['href', 'title', 'published_date', 'num_pages']) HostName = "http://www.newsmth.net" def extract_topic(text): soup = bs(text) topic_table = soup.find('table', {'class':'board-list tiz'}) topics = topic_table.findAll('td', {'class':'title_9'}) print type(topics) for elem in topics: a = elem.find('a') href = a['href'] title = a.string num_pages = 1 if (elem.span): other_pages = elem.span.findAll('a') num_pages += len(other_pages) published_date = elem.nextSibling.string try: timeArray = time.strptime(published_date, "%Y-%m-%d") except: today = datetime.date.today() published_date = today.__str__() yield Topic(href, title, published_date, num_pages) def topics_first_n_page(n): topics = [] headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.107 Safari/537.36', } query_dict = {} url_head = HostName + '/nForum/' r = rq.get(url_head, params = query_dict) r.encoding = 'gbk' url_head = HostName + '/nForum/board/WorkLife?ajax' headers = { 'Host': 'www.newsmth.net', 'Connection': 'keep-alive', 'Accept': '*/*', 'X-Requested-With': 'XMLHttpRequest', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.107 Safari/537.36', 'Referer': 'http://www.newsmth.net/nForum/', 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language': 'zh-CN,zh;q=0.8' } cookies = r.cookies for page_idx in range(1, n + 1): query_dict = {'ajax': None, 'p': page_idx} r = rq.get(url_head, params = query_dict, headers = headers, cookies = cookies) r.encoding = 'gbk' topic_generator = extract_topic(r.text) for topic in topic_generator: topics.append(topic) return topics def render(render_path, topics): f = open(render_path, 'w') def topic_to_tr(topic): format_tr = ''' <tr> <td><a href="%s">%s</a></td> <td>%s</td> <td>%s</td> </tr>''' topic = topic._replace(href = HostName + topic.href) return format_tr % topic def topic_tr_itr(): for topic in topics: topic_tr = topic_to_tr(topic) yield topic_tr # for topic_string in topic_tr_itr(): # f.write(topic_string) table_content = ''.join(topic_tr_itr()) table_templete = ''' <table cellpadding="0" cellspacing="0"> <thead> <tr> <th>主题</th> <th>发帖时间</th> <th>回复页数</th> </tr> </thead> <tbody>%s</tbody> </table>''' table_page = table_templete % table_content page_templete = ''' <!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.0 Transitional//EN'> <html> <head> <meta http-equiv='Content-Type' content='text/html; charset=GB18030'> </head> <body> %s </body> </html>''' page = page_templete % table_page f.write(page) f.close() if __name__ == "__main__": topics = topics_first_n_page(1) render_path = 'worklife.html' render(render_path, topics)
Python
0.000006
@@ -2944,15 +2944,13 @@ set= -GB18030 +utf-8 '%3E%0A%09 @@ -3030,16 +3030,34 @@ le_page%0A +%09print type(page)%0A %09%0A%09f.wri
7998477a627a78b83f96894e72ec2f121c4b9606
Update binding.gyp
binding.gyp
binding.gyp
{ "targets": [ { 'target_name': 'LDAP', 'sources': [ 'src/LDAP.cc' ], 'include_dirs': [ '/usr/local/include' ], 'defines': [ 'LDAP_DEPRECATED' ], 'cflags': [ '-Wall', '-g' ], 'ldflags': [ '-L/usr/local/lib', '-lldap' ], 'conditions': [ ['OS=="linux"', { 'ldflags': [ '-luuid' ] } ], ['OS=="mac"', { "link_settings": { "libraries": [ "-lldap" ] } } ] ] } ] }
Python
0
@@ -273,37 +273,39 @@ %5D,%0A 'l -dflag +ibrarie s': %5B%0A '- @@ -308,41 +308,75 @@ '- -L/usr/local/lib',%0A '-lldap +llber -lldap'%0A %5D,%0A 'ldflags': %5B%0A '-L/usr/local/lib '%0A
a101c98fff343cd283fc5f5ade555d4bf6187f20
Enable build on Windows 64bits.
binding.gyp
binding.gyp
{ 'variables': { 'source_root_dir': '<!(python tools/source_root_dir.py)', 'steamworks_sdk_dir': 'deps/steamworks_sdk', 'target_dir': 'lib' }, 'conditions': [ ['OS=="win"', { 'conditions': [ ['target_arch=="ia32"', { 'variables': { 'project_name': 'greenworks-win32', 'redist_bin_dir': '', 'lib_steam': 'steam_api.lib', 'lib_dll_steam': 'steam_api.dll', }, }], ['target_arch=="x64"', { 'variables': { 'project_name': 'greenworks-win64', 'redist_bin_dir': 'win64', 'lib_steam': 'steam_api64.lib', 'lib_dll_steam': 'steam_api64.dll', }, }], ], }], ['OS=="mac"', { 'conditions': [ ['target_arch=="ia32"', { 'variables': { 'project_name': 'greenworks-osx32', }, }], ['target_arch=="x64"', { 'variables': { 'project_name': 'greenworks-osx64', }, }], ], 'variables': { 'redist_bin_dir': 'osx32', 'lib_steam': 'libsteam_api.dylib' }, }], ['OS=="linux"', { 'conditions': [ ['target_arch=="ia32"', { 'variables': { 'project_name': 'greenworks-linux32', 'redist_bin_dir': 'linux32', 'lib_steam': 'libsteam_api.so' } }], ['target_arch=="x64"', { 'variables': { 'project_name': 'greenworks-linux64', 'redist_bin_dir': 'linux64', 'lib_steam': 'libsteam_api.so' } }], ], }], ], 'targets': [ { 'target_name': '<(project_name)', 'sources': [ 'src/greenworks_api.cc', 'src/greenworks_async_workers.cc', 'src/greenworks_async_workers.h', 'src/greenworks_workshop_workers.cc', 'src/greenworks_workshop_workers.h', 'src/greenworks_utils.cc', 'src/greenworks_utils.h', 'src/greenworks_unzip.cc', 'src/greenworks_unzip.h', 'src/greenworks_zip.cc', 'src/greenworks_zip.h', 'src/steam_async_worker.cc', 'src/steam_async_worker.h', ], 'include_dirs': [ 'deps', '<(steamworks_sdk_dir)/public', '<!(node -e "require(\'nan\')")' ], 'dependencies': [ 'deps/zlib/zlib.gyp:minizip' ], 'link_settings': { 'libraries': [ '<(source_root_dir)/<(steamworks_sdk_dir)/redistributable_bin/<(redist_bin_dir)/<(lib_steam)' ] }, 'conditions': [ ['OS== "linux"', { 'ldflags': [ '-Wl,-rpath,\$$ORIGIN', ], }, ], # For zlib.gyp::minizip library. ['OS=="mac" or OS=="ios" or OS=="android"', { # Mac, Android and the BSDs don't have fopen64, ftello64, or # fseeko64. We use fopen, ftell, and fseek instead on these # systems. 'defines': [ 'USE_FILE32API' ], }], ], 'xcode_settings': { 'WARNING_CFLAGS': [ '-Wno-deprecated-declarations', ], }, }, { 'target_name': 'copy_binaries', 'type': 'none', 'actions': [ { 'action_name': 'Copy Binaries', 'variables': { 'conditions': [ ['OS=="win"', { 'lib_steam_path': '<(source_root_dir)/<(steamworks_sdk_dir)/redistributable_bin/<(redist_bin_dir)/<(lib_dll_steam)', }], ['OS=="mac" or OS=="linux"', { 'lib_steam_path': '<(source_root_dir)/<(steamworks_sdk_dir)/redistributable_bin/<(redist_bin_dir)/<(lib_steam)', }], ] }, 'inputs': [ '<(lib_steam_path)', '<(PRODUCT_DIR)/<(project_name).node' ], 'outputs': [ '<(target_dir)', ], 'action': [ 'python', 'tools/copy_binaries.py', '<@(_inputs)', '<@(_outputs)', ], } ], }, ] }
Python
0
@@ -2759,32 +2759,179 @@ %7D,%0A %5D,%0A + %5B'OS== %22win%22 and target_arch==%22x64%22',%0A %7B%0A 'defines': %5B%0A '_AMD64_',%0A %5D,%0A %7D,%0A %5D,%0A # For zl
996c8d4e9a65f411341f0c5f349ff3788cca0209
Use unittest assertions.
rinse/tests/test_client.py
rinse/tests/test_client.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """Unit tests for rinse.client module.""" import unittest from lxml import etree from mock import MagicMock, patch from rinse.client import SoapClient from rinse.message import SoapMessage from .utils import captured_stdout class TestSoapMessage(unittest.TestCase): def test_soap_action(self): """Test that SOAP-Action HTTP header is set correctly.""" msg = SoapMessage(etree.Element('test')) req = msg.request('http://example.com', 'testaction') self.assertEqual(req.headers['SOAPAction'], 'testaction') def test_no_soap_action(self): """Test that SOAP-Action HTTP header is absent when no action given. """ msg = SoapMessage(etree.Element('test')) req = msg.request('http://example.com') self.assertTrue('SOAPAction' not in req.headers) def test_soap_action_is_none(self): """Test that SOAP-Action HTTP header is absent when no action is None. """ msg = SoapMessage(etree.Element('test')) req = msg.request('http://example.com', None) self.assertTrue('SOAPAction' not in req.headers) class TestRinseClient(unittest.TestCase): def test_soap_action(self): """Test that SOAP action is passed on to SoapMessage.request().""" msg = SoapMessage(etree.Element('test')) msg.request = MagicMock() with patch('requests.Session'): client = SoapClient('http://example.com') client(msg, 'testaction', build_response=lambda r: r) msg.request.assert_called_once_with('http://example.com', 'testaction') def test_soap_action_debug(self): msg = SoapMessage(etree.Element('test')) client = SoapClient('http://example.com', debug=True) client._session = MagicMock() with captured_stdout() as stdout: client(msg, 'testaction', build_response=lambda r: r) self.assertEqual( stdout.getvalue(), 'POST http://example.com\n' 'Content-Length: 164\n' 'Content-Type: text/xml;charset=UTF-8\n' 'SOAPAction: testaction\n' '\n' '<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">\n' ' <soapenv:Header/>\n' ' <soapenv:Body>\n' ' <test/>\n' ' </soapenv:Body>\n' '</soapenv:Envelope>\n' '\n' ) def test_no_soap_action(self): """Test that empty SOAP action is passed to SoapMessage.request() when no action given.""" msg = SoapMessage(etree.Element('test')) msg.request = MagicMock() with patch('requests.Session'): client = SoapClient('http://example.com') client(msg, build_response=lambda r: r) msg.request.assert_called_once_with('http://example.com', '') def test_timeout(self): msg = SoapMessage(etree.Element('test')) msg.request = MagicMock() client = SoapClient('http://example.com', timeout=1) assert client.timeout == 1 with patch('requests.Session'): client(msg, 'testaction', build_response=lambda r: r) assert client._session.send.call_args[1]['timeout'] == 1 with patch('requests.Session'): client(msg, 'testaction', build_response=lambda r: r, timeout=2) assert client._session.send.call_args[1]['timeout'] == 2 with patch('requests.Session'): client(msg, 'testaction', build_response=lambda r: r, timeout=None) assert client._session.send.call_args[1]['timeout'] is None if __name__ == '__main__': unittest.main()
Python
0
@@ -1156,16 +1156,53 @@ eaders)%0A + self.assertTrue(msg.etree())%0A %0A%0Aclass @@ -3170,31 +3170,41 @@ =1)%0A +self. assert - +Equal( client.timeo @@ -3205,21 +3205,20 @@ .timeout - == +, 1 +) %0A%0A @@ -3321,39 +3321,49 @@ r)%0A +self. assert - +Equal( client._session. @@ -3394,13 +3394,12 @@ ut'%5D - == +, 1 +) %0A%0A @@ -3517,39 +3517,49 @@ =2)%0A +self. assert - +Equal( client._session. @@ -3590,13 +3590,12 @@ ut'%5D - == +, 2 +) %0A%0A @@ -3728,15 +3728,25 @@ +self. assert - +Equal( clie @@ -3785,24 +3785,23 @@ imeout'%5D - is +, None +) %0A%0A%0Aif __
1d9808071e436d306f236856efe837a33f00fa96
Put keyboard visualizations on the rotary switch
gui-supervisor.py
gui-supervisor.py
#!/usr/bin/env python """Master program for the Organ Donor "Organelle" This program is mainly responsible for monitoring the physical rotary switch that allows users to select a major mode of operation for the Organelle. When it detects that the switch has been moved, it asks the current program to clean up and exit (by sending it a SIGUSR1 signal), waits for this to complete, and launches the newly selected program. Because Python GUI programs on the Raspberry Pi take a while to launch in the best case, there is no way the rotary switch can be super responsive. We'll have to settle for predictable but slow behavior. When the switch position starts to change, we'll go ahead and signal the running program, and continue to monitor the switch position. Once the switch reading has been steady for a while, we will launch the new program (which might be the same as the old program). 2015-05-25 Paul Williamson 2015-08-24 ptw Reconstructing lost work: actually launch programs and signal them, and don't use the screen. Added syslog logging. 2015-08-25 ptw Adjusted pin number to match as-built configuration. """ # If we're running on Raspbian Jessie, we can use GPIO without being root! # Otherwise, must run as root to use the built-in GPIO package. import RPi.GPIO as GPIO import sys, time import subprocess import syslog import signal syslog.openlog("organelle") syslog.syslog(syslog.LOG_INFO, "Organelle GUI supervisor started") switch_steady_delay = 1.0 # seconds before the switch is considered stable proc_exit_delay = 1.0 # seconds to allow the process to exit # Pin numbers will follow the Broadcom SoC pin numbering GPIO.setmode(GPIO.BCM) # Mapping of pins onto programs and their command-line arguments programs = { 4: ("xterm", "-fullscreen -e ./organelle.py p MIDI4x4_20:0 MIDIPLUS_1"), 17: ("xterm", "-fullscreen -e ./organelle.py p MIDI4x4_20:1 MIDIPLUS_2"), 27: ("xterm", "-fullscreen -e ./organelle.py p MIDI4x4_20:2 MIDIPLUS_3"), 22: ("xterm", "-fullscreen -e ./organelle.py p MIDI4x4_20:3 MIDIPLUS_4"), 5: ("xterm", "-fullscreen -e ./organelle.py 4"), # keyboards only 6: ("xterm", "-fullscreen -e ./organelle.py 4"), 13: ("xterm", "-fullscreen -e ./organelle.py 4"), 19: ("xterm", "-fullscreen -e ./jukebox.py 5"), # auto-play 26: ("xterm", "-fullscreen -e ./organelle.py t"), # theremin 23: ("xterm", "-fullscreen -e ./organelle.py g") # pitch game } # Extract the list of GPIO pins from the program mapping. pins = programs.keys() # Function that reads all the pins into a dictionary. def rotary_switch(): return {x : GPIO.input(x) for x in pins} # Given a dictionary levels containing the pin levels, # and hoping that exactly one of them is 0 (because it's a rotary switch), # return the pin number of the first one that's 0. # If somehow none of the pins are grounded, return None. def selected(levels): for pin,val in levels.iteritems(): if val == 0: return pin return None # Display a prompt in case the screen is unclaimed long enough to matter. #def prompt(): # sys.stderr.write("\x1b[2J\x1b[10;1H") # row 10, column 1 # print "Select mode using rotary switch" def prompt(): # we need not do anything to prompt; the wallpaper is the prompt. pass # Set all pins as inputs with pullup, so we just ground a pin to activate. for p in pins: GPIO.setup(p, GPIO.IN, pull_up_down=GPIO.PUD_UP) # The rotary switch monitoring goes on forever ... while True: prompt() # Here we are in between programs. Wait for a constant switch reading. levels = rotary_switch() waitfor = time.time() + switch_steady_delay while time.time() < waitfor: newlevels = rotary_switch() if newlevels != levels: levels.update(newlevels) waitfor = time.time() + switch_steady_delay # OK, the switch has been steady for long enough. Launch that program! choice = selected(levels) if choice is None: continue (prog,arg) = programs[choice] # dummy launch for testing #print "Here we launch %s %s" % (prog,arg) proc = subprocess.Popen([prog]+arg.split()) if not proc: syslog.syslog(syslog.LOG_ERROR, "Failed to launch " + prog + " " + arg) continue syslog.syslog(syslog.LOG_INFO, "Launched " + prog + " " + arg) # Program is running. Continue watching the rotary switch for changes. while levels == rotary_switch(): time.sleep(0.100) # Switch touched! Ask the program to exit and wait for it to do so. proc.send_signal(signal.SIGUSR1) proc.wait() # waitfor = time.time() + proc_exit_delay # while time.time() < waitfor: # if proc.poll(): # syslog.syslog(syslog.LOG_INFO, "Normal exit") # break # time.sleep(0.100) # if not proc.poll(): # # uh oh, program didn't exit as requested. Terminate with prejudice. # syslog.syslog(syslog.LOG_ERR, "Program failed to exit on request!") # proc.kill() # proc.wait() # if kill() doesn't work, we're hung too.
Python
0.000001
@@ -2189,111 +2189,85 @@ : (%22 -xterm%22, %22-fullscreen -e ./organelle.py 4%22),%0A 13: (%22xterm%22, %22-fullscreen -e ./organe +./gui-play-keyboards.py%22, %22%22),%0A 13: (%22./gui-play-waterfa ll -e .py - 4 +%22, %22 %22),%0A @@ -2289,49 +2289,34 @@ : (%22 -xterm%22, %22-fullscreen -e ./jukebox.py 5%22), +./autoplay.py%22, %22%22),%09%09%09%09%09%09 %09%09#
c6fefc19b0bb64c19d85cce0750c3d94fb608d76
Fix path building for csv source files in team app.
django_football/teams/utils.py
django_football/teams/utils.py
import os import csv import json import pickle import logging from random import choice, randint from django.core.exceptions import ObjectDoesNotExist import python_football from .models import Playbook, City, Nickname, Team from people import names from people.models import Coach, Player from teams.models import get_draft_position_order ## Initialization Functions def create_playbook(): playbook = Playbook(name='Basic', plays=json.dumps(pickle.dumps(python_football.new_playbook()))) playbook.save() def initialize_cities(): cities = [] with open(os.path.join('django_football','teams','csv_source_files','metroareas.csv'), 'r') as cities_file: cities_reader = csv.reader(cities_file,delimiter=',') for city in cities_reader: cities.append(City(name=city[0], state = city[1], pro = bool(int(city[2])), semipro = bool(int(city[3])), amateur = bool(int(city[4])), region = city[5], division = city[6], ) ) City.objects.bulk_create(cities) def initialize_nicknames(): nicknames = [] with open(os.path.join('django_football','teams','csv_source_files','nicknames.csv'), 'r') as nicknames_file: nickname_reader = csv.reader(nicknames_file,delimiter=',') for nickname in nickname_reader: nicknames.append(Nickname(name=nickname[0], pro = bool(int(nickname[1])), semipro = bool(int(nickname[2])) ) ) Nickname.objects.bulk_create(nicknames) # TODO investigate better way of testing presence of data def initialize_team_source_data(): try: Playbook.objects.get(id=1) except ObjectDoesNotExist: create_playbook() try: City.objects.get(id=1) except ObjectDoesNotExist: initialize_cities() try: Nickname.objects.get(id=1) except ObjectDoesNotExist: initialize_nicknames() ## Universe Creation Functions def determine_number_pro_teams(universe): position_counts=[] for position in ['qb', 'rb', 'wr', 'og', 'c', 'ot', 'dt', 'de', 'lb', 'cb', 's', 'k', 'p']: position_counts.append(Player.objects.filter(universe=universe, position=position.upper(), age__gte=23, ratings__gte=70).count()) return sum(position_counts) / len(position_counts) def create_initial_universe_teams(universe, level): logger = logging.getLogger('django.request') number_teams = determine_number_pro_teams(universe) if level == 'any': cities = City.objects.all() nicknames = Nickname.objects.all() elif level in ['pro', 'semipro', 'amateur']: level_filter = {} level_filter[level] = True cities = City.objects.filter(**level_filter) nicknames = Nickname.objects.filter(**level_filter) else: return HttpResponse("Invalid level for team creation.") coaches = [Coach(universe=universe, first_name=names.first_name(), last_name=names.last_name(), skill=randint(60,90), play_probabilities = json.dumps({}), fg_dist_probabilities = json.dumps({}) ) for x in xrange(int(number_teams))] for coach in coaches: coach.save() teams = [Team(universe=universe, city=choice(cities), nickname=choice(nicknames), human_control=False, home_field_advantage=randint(1,3), draft_position_order = get_draft_position_order(), coach = coaches.pop(), playbook = Playbook.objects.get(id=1)) for x in xrange(int(number_teams))] Team.objects.bulk_create(teams) logger.info('{0} teams created in universe {1}'.format(number_teams, universe.name))
Python
0
@@ -171,16 +171,53 @@ otball%0A%0A +from settings.base import SITE_ROOT%0A%0A from .mo @@ -259,16 +259,16 @@ e, Team%0A - from peo @@ -375,16 +375,86 @@ _order%0A%0A +CSV_SOURCE_DIR = os.path.join(SITE_ROOT, 'teams', 'csv_source_files')%0A %0A## Init @@ -717,53 +717,24 @@ oin( -'django_football','teams','csv_source_files', +CSV_SOURCE_DIR, 'met @@ -1416,53 +1416,24 @@ oin( -'django_football','teams','csv_source_files', +CSV_SOURCE_DIR, 'nic
182c2ea095dc5207b6d66ce1ad8e2ad2dc986da2
Fix test skipper
skimage/_shared/tests/test_testing.py
skimage/_shared/tests/test_testing.py
""" Testing decorators module """ import numpy as np from nose.tools import (assert_true, assert_raises, assert_equal) from skimage._shared.testing import doctest_skip_parser, test_parallel def test_skipper(): def f(): pass class c(): def __init__(self): self.me = "I think, therefore..." docstring = \ """ Header >>> something # skip if not HAVE_AMODULE >>> something + else >>> a = 1 # skip if not HAVE_BMODULE >>> something2 # skip if HAVE_AMODULE """ f.__doc__ = docstring c.__doc__ = docstring global HAVE_AMODULE, HAVE_BMODULE HAVE_AMODULE = False HAVE_BMODULE = True f2 = doctest_skip_parser(f) c2 = doctest_skip_parser(c) assert_true(f is f2) assert_true(c is c2) assert_equal(f2.__doc__, """ Header >>> something # doctest: +SKIP >>> something + else >>> a = 1 >>> something2 """) assert_equal(c2.__doc__, """ Header >>> something # doctest: +SKIP >>> something + else >>> a = 1 >>> something2 """) HAVE_AMODULE = True HAVE_BMODULE = False f.__doc__ = docstring c.__doc__ = docstring f2 = doctest_skip_parser(f) c2 = doctest_skip_parser(c) assert_true(f is f2) assert_equal(f2.__doc__, """ Header >>> something >>> something + else >>> a = 1 # doctest: +SKIP >>> something2 # doctest: +SKIP """) assert_equal(c2.__doc__, """ Header >>> something >>> something + else >>> a = 1 # doctest: +SKIP >>> something2 # doctest: +SKIP """) del HAVE_AMODULE f.__doc__ = docstring c.__doc__ = docstring assert_raises(NameError, doctest_skip_parser, f) assert_raises(NameError, doctest_skip_parser, c) def test_test_parallel(): state = [] @test_parallel() def change_state1(): state.append(None) change_state1() assert len(state) == 2 @test_parallel(num_threads=1) def change_state2(): state.append(None) change_state2() assert len(state) == 3 @test_parallel(num_threads=3) def change_state3(): state.append(None) change_state3() assert len(state) == 6 if __name__ == '__main__': np.testing.run_module_suite()
Python
0.000003
@@ -362,32 +362,36 @@ Header%0A%0A + %3E%3E%3E something # @@ -407,32 +407,36 @@ ot HAVE_AMODULE%0A + %3E%3E%3E some @@ -448,32 +448,36 @@ + else%0A + %3E%3E%3E a = 1 # skip @@ -493,24 +493,28 @@ AVE_BMODULE%0A + %3E%3E%3E @@ -825,42 +825,21 @@ -assert_equal(f2.__doc__,%0A +expected = %5C%0A @@ -858,37 +858,32 @@ er%0A%0A - %3E%3E%3E something # @@ -901,37 +901,32 @@ KIP%0A - - %3E%3E%3E something + @@ -934,37 +934,32 @@ lse%0A - %3E%3E%3E a = 1%0A @@ -956,37 +956,32 @@ = 1%0A - - %3E%3E%3E something2%0A @@ -983,37 +983,27 @@ ng2%0A - - %22%22%22 -) %0A assert_ @@ -1000,33 +1000,33 @@ assert_equal( -c +f 2.__doc__,%0A @@ -1023,203 +1023,56 @@ c__, -%0A %22%22%22 Header%0A%0A %3E%3E%3E something # doctest: +SKIP%0A %3E%3E%3E something + else%0A %3E%3E%3E a = 1%0A %3E%3E%3E something2%0A %22%22%22 + expected)%0A assert_equal(c2.__doc__, expected )%0A%0A @@ -1269,42 +1269,21 @@ -assert_equal(f2.__doc__,%0A +expected = %5C%0A @@ -1302,37 +1302,32 @@ er%0A%0A - %3E%3E%3E something%0A @@ -1328,37 +1328,32 @@ ing%0A - - %3E%3E%3E something + @@ -1349,37 +1349,32 @@ omething + else%0A - %3E%3E%3E @@ -1400,37 +1400,32 @@ KIP%0A - - %3E%3E%3E something2 @@ -1450,29 +1450,19 @@ - - %22%22%22 -) %0A ass @@ -1463,33 +1463,33 @@ assert_equal( -c +f 2.__doc__,%0A @@ -1486,222 +1486,56 @@ c__, -%0A %22%22%22 Header%0A%0A %3E%3E%3E something%0A %3E%3E%3E something + else%0A %3E%3E%3E a = 1 # doctest: +SKIP%0A %3E%3E%3E something2 # doctest: +SKIP%0A %22%22%22 + expected)%0A assert_equal(c2.__doc__, expected )%0A%0A
01bee58d06a2af3f94e3a1be954abd845da52ba1
Update language.py
language_detection/language.py
language_detection/language.py
import pickle import os from sklearn.decomposition import TruncatedSVD from sklearn.metrics import pairwise class language_detection: def __init__(self): # ''' Constructor for this class. ''' __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) self.svd=pickle.load(open(__location__+"//data//svd.MODEL",'rb')) self.vocabulary=pickle.load(open(__location__+"//data//lexicon.MODEL",'rb')) self.id_of_languages=pickle.load(open(__location__+"//data//language_id.MODEL",'rb')) self.svdmatrix=pickle.load(open(__location__+"//data//svdmatrix.MODEL",'rb')) def language_list(self): lan_list=[] for item in self.id_of_languages.keys(): lan_list.append((self.id_of_languages[item]['id'],self.id_of_languages[item]['name'])) return lan_list def index_finder(self,text): clean_text=self.clean(text) n_gram=self.ngram_extractor(clean_text) matrix=self.ngram_to_matrix(n_gram) svd_matrix=self.svd_transform(matrix) index=self.detect_similarity(svd_matrix) return index def langauge_id(self,text): index=self.index_finder(text) print "The Language ID is: "+self.id_of_languages[index]["id"] return self.id_of_languages[index]["id"] def langauge_name(self,text): index=self.index_finder(text) print "The Language Name is: "+self.id_of_languages[index]["name"] return self.id_of_languages[index]["name"] def clean(self,text): try: clean_text=text.decode("utf8").lower() except: clean_text=text.lower() clean_text=clean_text.replace(" ","") return clean_text def ngram_extractor(self,text): n_gram_list=[] for i in range(len(text)-3): n_gram_list.append(text[i:i+4]) return list(set(n_gram_list)) def ngram_to_matrix(self,n_gram): matrix=[0]*len(self.vocabulary) for gram in n_gram: try: position=self.vocabulary[gram] matrix[position]=1 except: pass return matrix def svd_transform(self,matrix): return self.svd.transform([matrix]) def detect_similarity(self,svd_matrix): ind=0 max_sim=-1 sim=pairwise.cosine_similarity(self.svdmatrix,svd_matrix) for i in range(len(sim)): if(sim[i]>max_sim): max_sim=sim[i] ind=i return ind
Python
0.000001
@@ -1183,18 +1183,18 @@ def lang -a u +a ge_id(se @@ -1383,10 +1383,10 @@ lang -a u +a ge_n
2a65b1715e469e11ed73faf7f3446f81c836c42e
Add fetch domain logic
handler/domain.py
handler/domain.py
#!/usr/bin/python # -*- coding:utf-8 -*- # Powered By KK Studio # Domain Page from BaseHandler import BaseHandler from tornado.web import authenticated as Auth class IndexHandler(BaseHandler): @Auth def get(self): self.render('domain/index.html') class GroupHandler(BaseHandler): @Auth def get(self): self.render('domain/group.html') class RecordHandler(BaseHandler): @Auth def get(self): self.render('domain/record.html')
Python
0.000001
@@ -154,16 +154,64 @@ as Auth%0A +from model.models import Domain, Groups, Record%0A %0A%0Aclass @@ -258,32 +258,236 @@ def get(self):%0A + page = int(self.get_argument('page', 1))%0A line = int(self.get_argument('line', 20))%0A offset = (page - 1) * line%0A data = self.db.query(Domain).offset(offset).limit(line).all()%0A self.ren @@ -505,24 +505,34 @@ /index.html' +,data=data )%0A%0A%0Aclass Gr
c7412824c1e9edb7c386f111ce30b5d76952f861
Remove 'reviews' from Context API return
mendel/serializers.py
mendel/serializers.py
from .models import Keyword, Category, Document, Context, Review, User from rest_auth.models import TokenModel from rest_framework import serializers class KeywordSerializer(serializers.ModelSerializer): class Meta: model = Keyword fields = ('id', 'name', 'definition') def create(self, validated_data): instance, _ = Keyword.objects.get_or_create(**validated_data) return instance class CategorySerializer(serializers.ModelSerializer): class Meta: model = Category fields = ('id', 'name', 'description') class ContextSerializer(serializers.ModelSerializer): keyword_given = KeywordSerializer() user_reviews = serializers.SerializerMethodField('get_reviews') def get_reviews(self, obj): results = Review.objects.filter(user=self.context['request'].user) return ReviewSerializer(results, many=True).data class Meta: model = Context fields = ('id', 'position_from', 'position_to', 'text', 'document', 'keyword_given', 'next_context_id', 'prev_context_id', 'reviews', 'user_reviews') depth = 1 class DocumentSerializer(serializers.ModelSerializer): class Meta: model = Document fields = ('__all__') class ReviewSerializer(serializers.ModelSerializer): class Meta: model = Review fields = ('__all__') class UserSerializer(serializers.ModelSerializer): last_context_id = serializers.SerializerMethodField('return_last_context_id') def return_last_context_id(self, user): try: return Review.objects.filter(user=user.id).latest('created').context.id except: return Context.objects.first().id if Context.objects.first() else None class Meta: model = User fields = ('id', 'username', 'is_staff', 'last_context_id') class TokenSerializer(serializers.ModelSerializer): user = UserSerializer() class Meta: model = TokenModel fields = ('key','user',) depth = 1
Python
0.000006
@@ -1072,19 +1072,8 @@ id', - 'reviews', 'us
f471441bde9940e46badd0ec506c18e8587de004
Optimize the rebuild admin
metaci/build/admin.py
metaci/build/admin.py
from django.contrib import admin from metaci.build.models import Build from metaci.build.models import BuildFlow from metaci.build.models import FlowTask from metaci.build.models import Rebuild class BuildAdmin(admin.ModelAdmin): list_display = ( 'repo', 'plan', 'branch', 'commit', 'status', 'time_queue', 'time_start', 'time_end', ) list_filter = ('repo', 'plan') list_select_related = ('branch','repo','plan') raw_id_fields = ('branch', 'plan', 'repo', 'org', 'org_instance', 'current_rebuild') admin.site.register(Build, BuildAdmin) class BuildFlowAdmin(admin.ModelAdmin): list_display = ( 'build', 'status', 'time_queue', 'time_start', 'time_end', ) list_filter = ('build__repo', 'build') admin.site.register(BuildFlow, BuildFlowAdmin) class FlowTaskAdmin(admin.ModelAdmin): list_display = ('id', 'build_flow', 'stepnum', 'path', 'status') list_filter = ('build_flow__build__repo',) raw_id_fields = ['build_flow'] admin.site.register(FlowTask, FlowTaskAdmin) class RebuildAdmin(admin.ModelAdmin): list_display = ( 'build', 'user', 'status', 'time_queue', 'time_start', 'time_end', ) list_filter = ('build__repo', 'build') admin.site.register(Rebuild, RebuildAdmin)
Python
0.000003
@@ -1325,32 +1325,84 @@ d__repo', 'build +__plan')%0A raw_id_fields = ('build', 'org_instance ')%0Aadmin.site.re
206f76026504219ed52f2fcca1b6b64b78bdcf21
Add some print statements
software/lightpowertool/csv_export.py
software/lightpowertool/csv_export.py
import csv class CSVExport(object): """docstring for CSVExport""" def __init__(self, filename): super(CSVExport, self).__init__() self._filename = filename def export_data(self, data): with open(self._filename, "w", newline='') as csvfile: csvwriter = csv.writer(csvfile, delimiter=',') for single_data in data: csvwriter.writerow(list(single_data))
Python
0.006669
@@ -208,16 +208,66 @@ data):%0A + print(%22Beginning exportation of data...%22)%0A @@ -471,8 +471,52 @@ _data))%0A + print(%22Exportation has been done!%22)%0A
3121a02c6174a31b64974d57a3ec2d7df760a7ae
Ajoute une référence législative au taux d'incapacité
openfisca_france/model/caracteristiques_socio_demographiques/capacite_travail.py
openfisca_france/model/caracteristiques_socio_demographiques/capacite_travail.py
# -*- coding: utf-8 -*- from openfisca_france.model.base import * class taux_capacite_travail(Variable): value_type = float default_value = 1.0 entity = Individu label = u"Taux de capacité de travail, appréciée par la commission des droits et de l'autonomie des personnes handicapées (CDAPH)" definition_period = MONTH class taux_incapacite(Variable): value_type = float entity = Individu label = u"Taux d'incapacité" definition_period = MONTH
Python
0.000012
@@ -458,28 +458,323 @@ definition_period = MONTH%0A + reference = %22https://www.legifrance.gouv.fr/affichCodeArticle.do;jsessionid=BD54F4B28313142C87FC8B96013E0441.tplgfr44s_1?idArticle=LEGIARTI000023097719&cidTexte=LEGITEXT000006073189&dateTexte=20190312%22%0A documentation = %22Taux d'incapacit%C3%A9 retenu pour l'Allocation Adulte Handicap%C3%A9 (AAH).%22%0A
a279cb4340c6da5ed64b39660cfcb5ef53d0bb74
Fix test
tests/core/test_node.py
tests/core/test_node.py
from common import auth_check def test_node_fields(mc): cclient = mc.client fields = { 'nodeTaints': 'r', 'nodeLabels': 'r', 'nodeAnnotations': 'r', 'namespaceId': 'cr', 'conditions': 'r', 'allocatable': 'r', 'capacity': 'r', 'hostname': 'r', 'info': 'r', 'ipAddress': 'r', 'limits': 'r', 'nodePoolUuid': 'r', 'nodeName': 'r', 'requested': 'r', 'clusterId': 'cr', 'etcd': 'cru', 'controlPlane': 'cru', 'worker': 'cru', 'requestedHostname': 'cr', 'volumesAttached': 'r', 'nodeTemplateId': 'cr', 'volumesInUse': 'r', 'podCidr': 'r', 'name': 'cru', 'taints': 'ru', 'unschedulable': 'ru', 'providerId': 'r', 'sshUser': 'r', 'imported': "cru", } for name, field in cclient.schema.types['node'].resourceFields.items(): if name.endswith("Config"): fields[name] = 'cr' fields['customConfig'] = 'cru' auth_check(cclient.schema, 'node', 'crud', fields)
Python
0.000004
@@ -502,25 +502,24 @@ 'etcd': 'cr -u ',%0A ' @@ -532,25 +532,24 @@ lPlane': 'cr -u ',%0A ' @@ -556,25 +556,24 @@ worker': 'cr -u ',%0A '
7b67ae8910b90dda49d370dd95fb5969a9a5d16b
Fix restrict_quadrants and add all 4 corners
cell.py
cell.py
from __future__ import division import collections import itertools import math Spoke = collections.namedtuple('Spoke', 'start, end') def get_neighbours(cell, include_self=False): """ Get 8 neighbouring cell coords to a start cell. If `include_self` is True, returns the current (center) cell as well. """ offsets = list(itertools.product([0, 1, -1], repeat=2)) if not include_self: del offsets[offsets.index((0, 0))] # Don't include start cell return [(cell[0] + dx, cell[1] + dy) for dx, dy in offsets] def restrict_quadrants(neighbours, start, end): if end[0] > start[0]: cells = [x for x in neighbours if x[0] >= start[0]] if end[1] > start[1]: cells = [x for x in neighbours if x[1] >= start[1]] return cells def right_intersection(point, line): """ Determine the point at which a point is closest to a line A line through that point would intersect at a right angle. """ if line.start[1] == line.end[1]: # line is horizontal (same y values) return (point[0], line.start[1]) elif line.start[0] == line.end[0]: # line is vertical (same x values) return (line.start[0], point[1]) m = (line.end[1] - line.start[1]) / (line.end[0] - line.start[0]) # slope b = line.start[1] - m * line.start[0] # y-intercept c = point[1] + point[0] / m # y-intercept of intersecting line x = m * (c - b) / (m ** 2 + 1) # x-coord of intersection y = m * x + b # y-coord of intersection return (x, y) def point_distance(start, end, squared=False): """ Calculate distance between two points using Pythagorean theorem. """ d_squared = (end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2 if squared: return d_squared else: return math.sqrt(d_squared) def discretize_line(start, end): """ Turn start and end points (which are integer (x, y) tuples) into a list of integer (x, y) points forming a line. """ max_length = abs(end[1] - start[1]) + abs(end[0] - start[0]) Line = collections.namedtuple('Line', 'start, end') line = Line(start, end) print(line) results = [start] seen = set() while start != end: neighbours = get_neighbours(start) neighbours = restrict_quadrants(neighbours, start, end) print('\nnext round') print(neighbours) next_cell = None min_distance = float('inf') for cell in neighbours: if cell in seen: # Don't go backwards continue intersection = right_intersection(cell, line) distance = point_distance(cell, intersection) print(cell, distance) if distance < min_distance: min_distance = distance next_cell = cell results.append(next_cell) if len(results) > max_length: # Failed! return None seen.add(next_cell) start = next_cell return results
Python
0.000001
@@ -587,24 +587,50 @@ tart, end):%0A + cells = neighbours%5B:%5D%0A if end%5B0 @@ -667,33 +667,111 @@ %5Bx for x in -neighbour +cells if x%5B0%5D %3E= start%5B0%5D%5D%0A elif end%5B0%5D %3C start%5B0%5D:%0A cells = %5Bx for x in cell s if x%5B0%5D %3E= @@ -760,33 +760,33 @@ n cells if x%5B0%5D -%3E +%3C = start%5B0%5D%5D%0A @@ -835,25 +835,103 @@ or x in -neighbour +cells if x%5B1%5D %3E= start%5B1%5D%5D%0A elif end%5B1%5D %3C start%5B1%5D:%0A cells = %5Bx for x in cell s if x%5B1 @@ -924,33 +924,33 @@ n cells if x%5B1%5D -%3E +%3C = start%5B1%5D%5D%0A%0A
1bf3e893e45e0dc16e2e820f5f073a63600217c3
Fix errors in PeriodicFilter
robotpy_ext/misc/periodic_filter.py
robotpy_ext/misc/periodic_filter.py
import logging import wpilib class PeriodicFilter: """ Periodic Filter to help keep down clutter in the console. Simply add this filter to your logger and the logger will only print periodically. The logger will always print logging levels of WARNING or higher """ def __init__(self, period, bypassLevel=logging.WARN): ''' :param period: Wait period (in seconds) between logs :param bypassLevel: Lowest logging level that the filter should ignore ''' self.period = period self.loggingLoop = True self._last_log = -period self.bypassLevel = bypassLevel def filter(self, record): """Performs filtering action for logger""" self._refresh_logger() return self.parent.loggingLoop or record.levelno >= self.bypassLevel def _refresh_logger(self): """Determine if the log wait period has passed""" now = wpilib.Timer.getFPGATimestamp() self.loggingLoop = False if now - self.__last_log > self.logging_interval: self.loggingLoop = True self.__last_log = now
Python
0.000119
@@ -15,22 +15,20 @@ %0Aimport -wpilib +time %0A%0A%0Aclass @@ -538,16 +538,17 @@ self. +_ period = @@ -560,32 +560,33 @@ od%0A self. +_ loggingLoop = Tr @@ -630,24 +630,25 @@ self. +_ bypassLevel @@ -798,15 +798,9 @@ elf. -parent. +_ logg @@ -833,16 +833,17 @@ %3E= self. +_ bypassLe @@ -954,37 +954,22 @@ w = -wpilib.Timer.getFPGATimestamp +time.monotonic ()%0A @@ -972,32 +972,33 @@ ()%0A self. +_ loggingLoop = Fa @@ -1020,25 +1020,24 @@ now - self._ -_ last_log %3E s @@ -1044,24 +1044,15 @@ elf. -logging_interval +_period :%0A @@ -1066,16 +1066,17 @@ self. +_ loggingL @@ -1104,17 +1104,16 @@ self._ -_ last_log
881b5c56e89fb2fdb7d4af3a9ec5c5044a25b878
declare dummy functions
ansible/lib/modules/rally/library/test.py
ansible/lib/modules/rally/library/test.py
from ansible.module_utils.basic import * DOCUMENTATION = ''' --- module: rally short_description: Executes rally commands ''' def main(): fields = { "scenario_file" : {"required": True, "type": "str"}, "scenario_args" : {"required" : False, "type": "str"}, } commands = {'create_db', 'create_deployment', 'check_deployment', 'start_task' } module = AnsibleModule(argument_spec={}) response = {"hello": "world"} module.exit_json(changed=False, meta=response) if __name__ == '__main__': main()
Python
0.000034
@@ -370,11 +370,27 @@ ask' + , 'task_report' %7D%0A - @@ -519,39 +519,199 @@ se)%0A - %0Aif __name__ == '__main__': +def create_db:%0A%09pass%0A%0Adef create_deployment:%0A%09pass%0A%0Adef check_deployment:%0A%09pass%0A%0Adef start_task:%0A%09pass%0A%0Adef task_report:%0A%09pass%0A%0A%09%0Aif __name__ == '__main__':%0A # CALL Rally loader%0A # TODO %0A
13fde1fc881e2a89a7332790d497ac67b32cdb14
remove print line :/
ansible/roles/common/files/gather_data.py
ansible/roles/common/files/gather_data.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Gather data from logs. Gather data from the logs following certain criteria given by a json file and finally putting the data in org-mode tables in a output file. """ __author__ = "joe di castro <joe@joedicastro.com>" __license__ = "MIT" __date__ = "2017-04-16" __version__ = "0.1" import json import os import re from argparse import ArgumentParser def arguments(): """Defines the command line arguments for the script.""" main_desc = "Gather data from VPS' log files to compare them in tables." parser = ArgumentParser(description=main_desc) parser.add_argument("-p", "--path", default=".", help="the directory tree where the logs are." "Default: the current one. ") parser.add_argument("-j", "--json", default="./criteria.json", help="the json file with the criteria" "Default: ./criteria.json") parser.add_argument("-v", "--version", action="version", version="%(prog)s {0}".format(__version__), help="show program's version number and exit") return parser def gather_datum(rules, log_text): """Gather datum from log text following a set of given rules. :rules: dictionary that contains a set of rules to gather the datum "log_text: string containing the text of a log file """ regex = re.compile(rules['regex']) if not rules['average']: try: datum = re.findall(regex, log_text)[rules['idx']] except IndexError: datum = '' else: values = [ (float(i) / pow(10, rules['exp'])) for i in re.findall(regex, log_text) ] try: str_format = '{0:.3f}' if rules['round'] else '{0:.0f}' datum = str_format.format(sum(values) / len(values)) except ZeroDivisionError: datum = '' return datum def draw_tables(rules, gathering): """Draw org-mode tables from gathered data following a criteria. :rules: dictionary with the rules to draw each table :gathering: dictionary with all the data gathered """ servers, output, separator = sorted(gathering.keys()), [], '|-' for table in rules.keys(): # title output.append(table.upper().replace('_', ' ')) output.append('=' * 79 + os.linesep) # header output.append(separator) output.append(' | '.join(i.title() for i in ['|'] + servers)) output.append(separator) # rows for row in rules[table].keys(): row_header = '| {0} |'.format(rules[table][row]) row_cells = ' | '.join( gathering[server].get(row, '') for server in servers ) output.append('{0}{1}'.format(row_header, row_cells)) # footer output.append(separator) output.append(os.linesep) return os.linesep.join(output) def main(): args = arguments().parse_args() criteria = json.load(open(args.json)) criteria_per_log = criteria['criteria_per_log'] criteria_per_table = criteria['criteria_per_table'] servers = [i.name for i in os.scandir(args.path) if i.is_dir()] gathering = {server: dict() for server in servers} for root, dirs, files in os.walk(args.path): for fil in files: this_server = os.path.basename(os.path.abspath(root)) log_path = os.path.abspath(os.path.join(root, fil)) log = os.path.basename(log_path) with open(log_path) as log_file: log_contents = log_file.read() for datum in criteria_per_log.get(log, []): gathering[this_server][datum] = gather_datum( criteria_per_log[log][datum], log_contents ) tables = draw_tables(criteria_per_table, gathering) with open(os.path.join(args.path, 'tables.org'), 'w') as tables_file: tables_file.write(tables) print(tables) if __name__ == '__main__': main()
Python
0.000017
@@ -4092,27 +4092,8 @@ s)%0A%0A - print(tables)%0A%0A %0Aif
07a500eaa758e2bc51d2860d5b0d3f8108f9e1ee
Include password for redis
chat.py
chat.py
import asyncio import json import os import random import string import time from urllib.parse import urlparse from aiohttp import web from asyncio_redis import Connection, ZScoreBoundary import bleach BASE_DIR = os.path.dirname(__file__) RATE_LIMIT_DURATION = 60 RATE_LIMIT = 10 def make_key(*args): return':'.join(args) # For bleach def linkify_external(attrs, new=False): attrs['target'] = '_blank' return attrs def strip_tags(value): return bleach.clean(value, tags=[], strip=True) async def post_message(request, message, mode, queue=None, **data): if queue is None: queue = make_key(request.match_info['channel'], 'channel') nick = await get_nick(request) data.setdefault('message', message) data.setdefault('sender', nick) content = json.dumps(data) await request['conn'].publish(queue, json.dumps([mode, content])) # Nick handling async def get_nicks(request): key = make_key(request.match_info['channel'], 'nick', '*') keys = await request['conn'].keys_aslist(key) if keys: vals = await request['conn'].mget_aslist(keys) return {k: v for k, v in zip(vals, keys)} return {} async def get_nick(request): key = make_key(request.match_info['channel'], 'nick', request.tag) nick = await request['conn'].get(key) if nick is None: nick = await set_nick(request, request.tag[:8]) else: await request['conn'].expire(key, 90) return nick async def set_nick(request, name): name = strip_tags(name) nicks = await get_nicks(request) if name in nicks: raise ValueError('Nick in use!') key = make_key(request.match_info['channel'], 'nick', request.tag) await request['conn'].set(key, name, expire=90) return name # Topic handling async def set_topic(request, topic): key = make_key(request.match_info['channel'], 'topic') await request['conn'].set(key, topic) async def get_topic(request): key = make_key(request.match_info['channel'], 'topic') return await request['conn'].get(key) # Request handlers async def index(request): return web.Response(body=open(os.path.join(BASE_DIR, 'index.html'), 'rb').read(), content_type='text/html') async def listen(request): if 'text/event-stream' not in request.headers['ACCEPT']: return web.http.HTTPNotAcceptable() nick = await get_nick(request) await post_message(request, '{} connected.'.format(nick), 'join', sender='Notice') resp = web.StreamResponse() resp.headers[web.hdrs.CONTENT_TYPE] = 'text/event-stream; charset=utf-8' resp.headers['Cache-Control'] = 'no-cache' await resp.prepare(request) subscriber = await request['conn'].start_subscribe() await subscriber.subscribe([ make_key(request.match_info['channel'], 'channel'), make_key(request.tag, 'private'), ]) while True: msg = await subscriber.next_published() mode, data = json.loads(msg.value) resp.write('event: {}\n'.format(mode).encode('utf-8')) for line in data.splitlines(): resp.write('data: {}\n'.format(line).encode('utf-8')) resp.write('\n'.encode('utf-8')) return resp async def chatter(request): POST = await request.post() mode = POST.get('mode', 'message') msg = POST.get('message', '') msg = bleach.linkify(strip_tags(msg), callbacks=[linkify_external]) nick = await get_nick(request) if mode == 'nick' and msg: try: new_nick = await set_nick(request, msg) except ValueError: await post_message(request, 'Nick in use!', 'alert', sender='Notice') else: await post_message(request, '{} is now known as {}'.format(nick, new_nick), mode='nick', sender='Notice') elif mode == 'names': nicks = await get_nicks(request) await post_message(request, list(nicks.keys()), 'names') elif mode == 'msg': target = POST['target'] nicks = await get_nicks(request) _, _, target_tag = nicks[target].split(':') await post_message(request, msg, 'msg', target=target, queue=make_key(target_tag, 'private')) await post_message(request, msg, 'msg', target=target, queue=make_key(request.tag, 'private')) elif mode in ['message', 'action']: await post_message(request, msg, mode) elif mode == 'topic': if msg: await set_topic(request, msg) topic = await get_topic(request) await post_message(request, topic, 'topic') return web.Response(body=b'') async def cookie_middleware(app, handler): async def middleware(request): tag = request.cookies.get('chatterbox', None) request.tag = tag or ''.join(random.choice(string.ascii_letters) for x in range(16)) url = urlparse(os.environ.get('REDIS_URL', 'redis://localhost:6379')) request['conn'] = await Connection.create(host=url.hostname, port=url.port) # Rate limit key = make_key(request.tag, 'rated') now = time.time() await request['conn'].zadd(key, {str(int(now)): now}) await request['conn'].expireat(key, int(now) + RATE_LIMIT_DURATION) await request['conn'].zremrangebyscore(key, ZScoreBoundary('-inf'), ZScoreBoundary(now - RATE_LIMIT_DURATION)) size = await request['conn'].zcard(key) if size > RATE_LIMIT: response = web.Response(body=b'', status=429) else: # Call handler response = await handler(request) # Set cookie if tag is None: response.set_cookie('chatterbox', request.tag) return response return middleware if __name__ == '__main__': app = web.Application(middlewares=[cookie_middleware]) app.router.add_get('/', index) app.router.add_static('/static/', os.path.join(BASE_DIR, 'static')) app.router.add_get('/{channel}/', listen) app.router.add_post('/{channel}/', chatter) loop = asyncio.get_event_loop() web.run_app(app, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
Python
0
@@ -4960,16 +4960,39 @@ url.port +, password=url.password )%0A%0A
27330e69226f36b49f5d5eca5a67af29ee8d679b
Normalize the weasyl link.
conbadge.py
conbadge.py
from fractions import Fraction from cStringIO import StringIO from PIL import Image, ImageDraw, ImageFont import qrcode import requests museo = ImageFont.truetype('Museo500-Regular.otf', 424) badge_back = Image.open('badge-back.png') logo_stamp = Image.open('logo-stamp.png') qr_size = 975, 975 qr_offset = 75, 75 name_color = 6, 155, 192 text_bounds = 735, 125 text_offset = 365, 1155 avatar_bounds = 282, 282 avatar_offset = 49, 1131 class AvatarFetchError(Exception): pass def draw_text(text, color, fit_size): text_size = museo.getsize(text) img = Image.new('RGBA', text_size, color + (0,)) draw = ImageDraw.Draw(img) draw.text((0, 0), text, color + (255,), font=museo) width, height = img.size fit_width, fit_height = fit_size width_ratio = Fraction(width, fit_width) height_ratio = Fraction(height, fit_height) if width_ratio > height_ratio: new_size = fit_width, int(height / width_ratio) else: new_size = int(width / height_ratio), fit_height return img.resize(new_size, Image.ANTIALIAS) def center(size, fit_size, offset): w, h = size fw, fh = fit_size x, y = offset return x + (fw - w) // 2, y + (fh - h) // 2 logo_pos = center(logo_stamp.size, qr_size, qr_offset) def weasyl_badge(username, avatar_resizing=Image.ANTIALIAS): r = requests.get( 'https://www.weasyl.com/api/useravatar', params={'username': username}) resp = r.json() if resp['error']['code'] != 0: raise AvatarFetchError(resp['error']) back = badge_back.copy() qr = qrcode.QRCode( error_correction=qrcode.constants.ERROR_CORRECT_H, border=1) qr.add_data('https://weasyl.com/~%s' % (username,)) qr_mask = qr.make_image().resize(qr_size) back.paste((255, 255, 255, 255), qr_offset, qr_mask) back.paste(logo_stamp, logo_pos, logo_stamp) text = draw_text(username, name_color, text_bounds) text_pos = center(text.size, text_bounds, text_offset) back.paste(text, text_pos, text) avatar = Image.open(StringIO(requests.get(resp['avatar']).content)) avatar = avatar.resize(avatar_bounds, avatar_resizing).convert('RGBA') back.paste(avatar, avatar_offset, avatar) return back
Python
0.000002
@@ -1261,16 +1261,104 @@ ffset)%0A%0A +def weasyl_sysname(target):%0A return ''.join(i for i in target if i.isalnum()).lower()%0A%0A def weas @@ -1779,16 +1779,31 @@ /~%25s' %25 +(weasyl_sysname (usernam @@ -1803,16 +1803,17 @@ username +) ,))%0A
12ff7545dc08484414edd28bb70bababd5e89c59
bump gitlab protocol version to 4
runbot_gitlab/models/runbot_repo.py
runbot_gitlab/models/runbot_repo.py
# Copyright <2017> <Vauxoo info@vauxoo.com> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). import logging import re import urllib.parse import requests from odoo import fields, models _logger = logging.getLogger(__name__) def _get_url(url, base): """When get is URL_GITHUB/api/v3/User/keys must be convert to URL_GITLAB/User.keys Because the api of gitlab required admin token for get the ssh keys https://docs.gitlab.com/ee/api/users.html#list-ssh-keys""" match_object = re.search('([^/]+)/([^/]+)/([^/.]+(.git)?)', base) if match_object: prefix = ('https://%s/api/v3%s' if not url.endswith('/keys') else 'https://%s%s') project_name = (match_object.group(2) + '/' + match_object.group(3)) url = url.replace(':owner', match_object.group(2)) url = url.replace(':repo', match_object.group(3)) url = prefix % (match_object.group(1), url) url = url.replace('/repos/', '/projects/') url = url.replace('/commits/', '/repository/commits/') url = url.replace(project_name, urllib.parse.quote(project_name, safe='')) if url.endswith('/keys'): url = url.replace('users/', '').replace('/keys', '') url = url + '.keys' if '/pulls/' in url: urls = url.split('/pulls/') url = urls[0] + '/merge_requests?iid=' + urls[1] return url def _get_session(token): session = requests.Session() session.auth = (token, 'x-oauth-basic') session.headers.update({'PRIVATE-TOKEN': token}) return session class RunbotRepo(models.Model): _inherit = "runbot.repo" uses_gitlab = fields.Boolean(help='Enable the ability to use gitlab ' 'instead of github') def _git(self, cmd): """Rewriting the parent method to get merge_request from gitlab""" repos_gitlab = self.filtered('uses_gitlab') for repo_gitlab in repos_gitlab: cmd_gitlab = cmd.copy() if cmd_gitlab == ['fetch', '-p', 'origin', '+refs/pull/*/head:refs/pull/*']: cmd_gitlab.pop() cmd_gitlab.append('+refs/merge-requests/*/head:refs/pull/*') return super(RunbotRepo, repos_gitlab)._git(cmd_gitlab) return super(RunbotRepo, self - repos_gitlab)._git(cmd) def _github(self, url, payload=None, ignore_errors=False): """This method is the same as the one in the odoo-extra/runbot.py file but with the translation of each request github to gitlab format - Get information from merge requests input: URL_GITLAB/projects/... instead of URL_GITHUB/repos/... output: res['base']['ref'] = res['gitlab_base_mr'] res['head']['ref'] = res['gitlab_head_mr'] - Get user public keys input: URL_GITLAB/User.keys... instead of URL_GITHUB/users/User/keys... output: res['author'] = {'login': data['username']} res['commiter'] = {'login': data['username']} - Report statutes input: URL_GITLABL/... instead of URL_GITHUB/statuses/... output: N/A """ records_gitlab = self.filtered('uses_gitlab') for repo in records_gitlab.filtered('token'): try: url = _get_url(url, repo.base) if not url: return is_url_keys = url.endswith('.keys') session = _get_session(repo.token) if payload: response = session.post(url, data=payload) else: response = session.get(url) response.raise_for_status() json = (response.json() if not is_url_keys else response._content) if 'merge_requests?iid=' in url: json = json[0] json['head'] = {'ref': json['target_branch']} json['base'] = {'ref': json['source_branch']} if '/commits/' in url: for own_key in ['author', 'committer']: key_email = '%s_email' % own_key if json[key_email]: url = _get_url('/users?search=%s' % json[key_email], repo.base) response = session.get(url) response.raise_for_status() data = response.json() json[own_key] = { 'login': len(data) and data[0]['username'] or {} } if is_url_keys: json = [{'key': ssh_rsa} for ssh_rsa in json.split('\n')] return json except Exception: if ignore_errors: _logger.exception('Ignored gitlab error %s %r', url, payload) else: raise return super(RunbotRepo, self - records_gitlab)._github( url, payload=payload, ignore_errors=ignore_errors)
Python
0
@@ -298,17 +298,17 @@ UB/api/v -3 +4 /User/ke @@ -615,17 +615,17 @@ %25s/api/v -3 +4 %25s'%0A
90678692ec85ec90d454b1a3b255dae834bb24ba
trim space
tests/mocks/postgres.py
tests/mocks/postgres.py
from psycopg2.extensions import connection, cursor class MockConnection(connection): def __init__(self, *args, **kwargs): self._cursor = MockCursor() def cursor(self, *args, **kwargs): return self._cursor class MockCursor(cursor): def __init__(self, *args, **kwargs): self.queries = [] def execute(self, query, *args, **kwargs): self.queries.append(query) def fetchall(self, *args, **kwargs): return [] def fetchone(self, *args, **kwargs): return None __all__ = ["MockConnection"]
Python
0.000001
@@ -1,9 +1,8 @@ -%0A from psy
3160ab66db8bb0321ff2fd4a77de84526e8beefa
set boost constraint (#27806)
var/spack/repos/builtin/packages/3dtk/package.py
var/spack/repos/builtin/packages/3dtk/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class _3dtk(CMakePackage): """The 3D Toolkit provides algorithms and methods to process 3D point clouds. It includes automatic high-accurate registration (6D simultaneous localization and mapping, 6D SLAM) and other tools, e.g., a fast 3D viewer, plane extraction software, etc. Several file formats for the point clouds are natively supported, new formats can be implemented easily.""" homepage = "http://slam6d.sourceforge.net/" # Repo seems to be in the process of switching to git: # https://github.com/3DTK/3DTK version('trunk', svn='https://svn.code.sf.net/p/slam6d/code/trunk', preferred=True) version('1.2', svn='https://svn.code.sf.net/p/slam6d/code/branches/3dtk-release-1.2') variant('cgal', default=False, description='Compile with CGAL support') variant('opengl', default=True, description='Compile with OpenGL support') variant('opencv', default=True, description='Compile with OpenCV support') variant('compact_octree', default=False, description='Whether to use the compact octree display') variant('cuda', default=False, description='Whether to build CUDA accelerated collision detection tools') variant('openmp', default=False, description='Whether to use parallel processing capabilities of OPENMP') conflicts('~opencv', when='platform=darwin') conflicts('+compact_octree', when='~opengl') generator = 'Ninja' depends_on('cmake@3.5:', when='@trunk', type='build') depends_on('cmake@2.6.1:2', when='@1.2', type='build') depends_on('ninja', type='build') depends_on('boost+serialization+graph+regex+filesystem+system+thread+date_time+program_options') depends_on('suite-sparse') depends_on('zlib') depends_on('libpng') depends_on('eigen') depends_on('cgal', when='+cgal') depends_on('gl', when='+opengl') depends_on('glew', when='+opengl') depends_on('freeglut', when='+opengl') depends_on('opencv+calib3d+contrib+core+features2d+highgui+imgcodecs+imgproc+ml+videoio', when='+opencv') # Because concretizer is broken depends_on('opencv+flann', when='+opencv') depends_on('cuda', when='+cuda') # TODO: add Spack packages for these instead of using vendored copies # depends_on('ann') # depends_on('newmat') patch('homebrew.patch', when='platform=darwin') def setup_build_environment(self, env): env.prepend_path('CPATH', self.spec['eigen'].prefix.include) def cmake_args(self): return [ self.define_from_variant('WITH_CGAL', 'cgal'), self.define('WITH_GMP', False), self.define('WITH_LIBZIP', False), self.define_from_variant('WITH_OPENGL', 'opengl'), self.define_from_variant('WITH_OPENCV', 'opencv'), self.define('WITH_QT', False), self.define('WITH_GLFW', False), self.define('WITH_FTGL', False), self.define('WITH_XMLRPC', False), self.define('WITH_LIBCONFIG', False), self.define('WITH_ROS', False), self.define('WITH_PYTHON', False), self.define('WITH_WXWIDGETS', False), self.define_from_variant('WITH_COMPACT_OCTREE', 'compact_octree'), self.define('WITH_GLEE', False), self.define('WITH_LASLIB', False), self.define('WITH_E57', False), self.define('WITH_3DMOUSE', False), self.define_from_variant('WITH_CUDA', 'cuda'), self.define('WITH_RIVLIB', False), self.define('WITH_MICROEPSILONLIB', False), self.define_from_variant('WITH_OPENMP', 'openmp'), self.define('WITH_METRICS', False), self.define('WITH_ADDONS', False), ] def install(self, spec, prefix): install_tree('bin', prefix.bin)
Python
0
@@ -1805,16 +1805,22 @@ n('boost +@:1.75 +seriali
4c5ebbabcf54b1f23459da7ddf85adf5e5de22d8
Update add-lore.py to serve legacy lorebot needs
add-lore.py
add-lore.py
#!/usr/bin/env python3 import argparse import datetime from peewee import peewee db = peewee.SqliteDatabase(None) class BaseModel(peewee.Model): class Meta: database = db class Lore(BaseModel): time = peewee.DateTimeField(null=True, index=True) author = peewee.CharField(null=True, index=True) lore = peewee.CharField() rating = peewee.FloatField() def main(): lore_file = 'lore.db' db.init(lore_file) parser = argparse.ArgumentParser() parser.add_argument('author') parser.add_argument('lore', nargs='+', help="blob of lore to save") args = parser.parse_args() print(args.lore) t = datetime.datetime.now() # Try to parse plain loreblob, extracting author from [] author = args.author lore = ' '.join(args.lore) print(author, lore) db.begin() # Check to see if lore already exists (based on author/lore match) matches = Lore.select().where(Lore.author == author and Lore.lore == lore).count() if matches == 0: Lore.create(time=t, author=author, lore=lore, rating=0) db.commit() if __name__ == "__main__": main()
Python
0
@@ -377,16 +377,109 @@ ield()%0A%0A + def __str__(self):%0A return %22%5B%25s%5D %5B%25s%5D%5Cn%25s%22 %25 (self.time, self.author, self.lore)%0A%0A %0Adef mai @@ -576,42 +576,8 @@ r()%0A - parser.add_argument('author')%0A @@ -679,21 +679,31 @@ s()%0A -pr +lore = ' '.jo in -t (args.lo @@ -703,24 +703,87 @@ args.lore)%0A%0A + # add-lore %22%5Bmike_bloomfield%5D: how do i use the lorebot %22%0A%0A t = date @@ -881,19 +881,55 @@ r = -args.author +lore.split(': ')%5B0%5D.split('%5B')%5B1%5D.split('%5D')%5B0%5D %0A @@ -929,36 +929,40 @@ %5B0%5D%0A lore +txt = ' +: '.join( args.lore)%0A @@ -953,41 +953,28 @@ oin( -args.lore)%0A print(author, lore +lore.split(': ')%5B1:%5D )%0A%0A @@ -1173,16 +1173,20 @@ %0A + l = Lore.cr @@ -1222,16 +1222,19 @@ ore=lore +txt , rating @@ -1237,16 +1237,83 @@ ting=0)%0A + print(l)%0A else:%0A print(%22Lore already exists...%22)%0A db.c
5f21305d1736322064aa9f5e503965b102a6c086
Add Ending class
hairball/plugins/neu.py
hairball/plugins/neu.py
"""This module provides plugins for NEU metrics.""" import kurt from hairball.plugins import HairballPlugin from PIL import Image import os class Variables(HairballPlugin): """Plugin that counts the number of variables in a project.""" def __init__(self): super(Variables, self).__init__() self.total = 0 def finalize(self): """Output the number of variables in the project.""" print("Number of variables: %i" % self.total) def analyze(self, scratch): """Run and return the results of the Variables plugin.""" self.total = len(scratch.variables) for x in scratch.sprites: self.total += len(x.variables) class Lists(HairballPlugin): """Plugin that counts the number of lists in a project.""" def __init__(self): super(Lists, self).__init__() self.total = 0 def finalize(self): """Output the number of lists in the project.""" print("Number of lists: %i" % self.total) def analyze(self, scratch): """Run and return the results of the Lists plugin.""" self.total = len(scratch.lists) for x in scratch.sprites: self.total += len(x.lists) class BlockCounts(HairballPlugin): """Plugin that keeps track of the number of blocks in a project.""" def __init__(self): super(BlockCounts, self).__init__() self.blocks = 0 def finalize(self): """Output the aggregate block count results.""" print("Number of blocks %i" % self.blocks) def analyze(self, scratch): """Run and return the results from the BlockCounts plugin.""" for script in self.iter_scripts(scratch): for b in self.iter_blocks(script.blocks): self.blocks += 1 class Colors(HairballPlugin): """Plugin that keeps track of the colors of the stage images.""" def __init__(self): self.colors ={} def finalize(self): """Output the aggregate block count results.""" print self.colors def compute_average_image_color(self, img): """ Compute the most frequent color in img. Code adapted from http://blog.zeevgilovitz.com/detecting-dominant-colours-in-python/ """ image = Image.open(img) w, h = image.size pixels = image.getcolors(w * h) most_frequent_pixel = pixels[0] for count, colour in pixels: if count > most_frequent_pixel[0]: most_frequent_pixel = (count, colour) rgb = [] for i in range(3): rgb.append (most_frequent_pixel[1][i]) trgb = tuple(rgb) trgb = '#%02x%02x%02x' % trgb #Transform rgb to Hex color (HTML) return trgb def analyze(self, scratch): """Run and return the results from the BlockCounts plugin.""" #ToDo: get the images from stage and characters
Python
0.000011
@@ -624,33 +624,38 @@ es)%0A for -x +sprite in scratch.spri @@ -689,17 +689,22 @@ += len( -x +sprite .variabl @@ -1166,17 +1166,22 @@ for -x +sprite in scra @@ -1223,17 +1223,22 @@ += len( -x +sprite .lists)%0A @@ -1741,16 +1741,20 @@ for b +lock in self @@ -2892,32 +2892,32 @@ unts plugin.%22%22%22%0A - #ToDo: g @@ -2955,8 +2955,714 @@ aracters +%0A%0Aclass Ending(HairballPlugin):%0A%0A %22%22%22Plugin that checks if the project seems to end.%22%22%22%0A%0A def __init__(self):%0A super(Ending, self).__init__()%0A self.total = 0%0A%0A def finalize(self):%0A %22%22%22Output whether the project seems to end or not.%22%22%22%0A if self.total %3E 0:%0A print %22The game seems to end at some point%22%0A else:%0A print %22The game seems to not ever end%22%0A%0A def analyze(self, scratch):%0A %22%22%22Run and return the results of the Ending plugin.%22%22%22 %0A for script in self.iter_scripts(scratch):%0A for name, _, _ in self.iter_blocks(script.blocks):%0A if name == %22stop %25s%22:%0A self.total += 1%0A
6ed282bb2da04790e6e399faad4d2ba8dfc214c4
add v0.20210330 (#28111)
var/spack/repos/builtin/packages/ccls/package.py
var/spack/repos/builtin/packages/ccls/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Ccls(CMakePackage): """C/C++ language server""" homepage = "https://github.com/MaskRay/ccls" git = "https://github.com/MaskRay/ccls.git" url = "https://github.com/MaskRay/ccls/archive/0.20201025.tar.gz" maintainers = ['jacobmerson'] version('0.20201025', sha256='1470797b2c1a466e2d8a069efd807aac6fefdef8a556e1edf2d44f370c949221') variant('build_type', default='Release', description='CMake build type', values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel')) depends_on("cmake@3.8:", type="build") depends_on('llvm@7:') depends_on('rapidjson')
Python
0
@@ -485,16 +485,117 @@ rson'%5D%0A%0A + version('0.20210330', sha256='28c228f49dfc0f23cb5d581b7de35792648f32c39f4ca35f68ff8c9cb5ce56c2')%0A vers
f230e69780823f4ceb48a68015cd5bd4af94cba0
Add in some settings for email
ml_service_api/aws.py
ml_service_api/aws.py
""" Deployment settings file """ from settings import * import json DEBUG=False TIME_BETWEEN_INDEX_REBUILDS = 60 * 30 # seconds #Tastypie throttle settings THROTTLE_AT = 100 #Throttle requests after this number in below timeframe THROTTLE_TIMEFRAME= 60 * 60 #Timeframe in which to throttle N requests, seconds THROTTLE_EXPIRATION= 24 * 60 * 60 # When to remove throttle entries from cache, seconds with open(os.path.join(ENV_ROOT,"env.json")) as env_file: ENV_TOKENS = json.load(env_file) with open(os.path.join(ENV_ROOT, "auth.json")) as auth_file: AUTH_TOKENS = json.load(auth_file) DATABASES = AUTH_TOKENS.get('DATABASES', DATABASES) CACHES = AUTH_TOKENS.get('CACHES', CACHES) AWS_ACCESS_KEY_ID = AUTH_TOKENS.get('AWS_ACCESS_KEY_ID', AWS_ACCESS_KEY_ID) AWS_SECRET_ACCESS_KEY = AUTH_TOKENS.get('AWS_SECRET_ACCESS_KEY', AWS_SECRET_ACCESS_KEY) USE_S3_TO_STORE_MODELS = ENV_TOKENS.get('USE_S3_TO_STORE_MODELS', USE_S3_TO_STORE_MODELS) S3_BUCKETNAME = ENV_TOKENS.get('S3_BUCKETNAME', S3_BUCKETNAME) BROKER_URL = AUTH_TOKENS.get('BROKER_URL', BROKER_URL) CELERY_RESULT_BACKEND = AUTH_TOKENS.get('CELERY_RESULT_BACKEND', CELERY_RESULT_BACKEND) ELB_HOSTNAME = ENV_TOKENS.get('ELB_HOSTNAME', None) EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND) if ELB_HOSTNAME is not None: ALLOWED_HOSTS += [ELB_HOSTNAME]
Python
0.000001
@@ -1270,16 +1270,203 @@ CKEND)%0A%0A +AWS_SES_REGION_NAME = ENV_TOKENS.get('AWS_SES_REGION_NAME', None)%0Aif AWS_SES_REGION_NAME is not None:%0A AWS_SES_REGION_ENDPOINT = 'email.%7B0%7D.amazonaws.com'.format(AWS_SES_REGION_NAME)%0A%0A if ELB_H
e4fff83666ee6e3ce63145f84a550f6fb361096d
Fix Enum hack situation
nycodex/db.py
nycodex/db.py
from enum import Enum import os import typing import sqlalchemy from sqlalchemy.dialects import postgresql from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() # type: typing.Any engine = sqlalchemy.create_engine(os.environ["DATABASE_URI"]) Session = sqlalchemy.orm.sessionmaker(bind=engine) class DomainCategory(Enum): BIGAPPS = "NYC BigApps" BUSINESS = "Business" CITY_GOVERNMENT = "City Government" EDUCATION = "Education" ENVIRONMENT = "Environment" HEALTH = "Health" HOUSING_DEVELOPMENT = "Housing & Development" PUBLIC_SAFETY = "Public Safety" RECREATION = "Recreation" SOCIAL_SERVICES = "Social Services" TRANSPORTATION = "Transportation" class AssetType(Enum): CALENDAR = 'calendar' CHART = 'chart' DATALENS = 'datalens' DATASET = 'dataset' FILE = 'file' FILTER = 'filter' HREF = 'href' MAP = 'map' class DbMixin(): __table__: sqlalchemy.Table @classmethod def upsert(cls, conn: sqlalchemy.engine.base.Connection, instances: typing.Iterable["DbMixin"]) -> None: keys = cls.__table__.c.keys() for instance in instances: data = {key: getattr(instance, key) for key in keys} insert = (postgresql.insert(cls.__table__).values(**data) .on_conflict_do_update( index_elements=[cls.__table__.c.id], set_={k: data[k] for k in data if k != 'id'})) conn.execute(insert) def __eq__(self, other): keys = self.__table__.c.keys() return ({key: getattr(self, key) for key in keys} == { key: getattr(other, key) for key in keys }) class Dataset(Base, DbMixin): __tablename__ = "dataset" id = sqlalchemy.Column(sqlalchemy.CHAR(9), primary_key=True) name = sqlalchemy.Column(sqlalchemy.VARCHAR, nullable=False) description = sqlalchemy.Column(sqlalchemy.TEXT, nullable=False) is_official = sqlalchemy.Column(sqlalchemy.BOOLEAN, nullable=False) owner_id = sqlalchemy.Column( sqlalchemy.CHAR(9), sqlalchemy.ForeignKey("owner.id")) updated_at = sqlalchemy.Column( sqlalchemy.TIMESTAMP(timezone=True), nullable=False) scraped_at = sqlalchemy.Column( sqlalchemy.TIMESTAMP(timezone=True), nullable=True) domain_category = sqlalchemy.Column( postgresql.ENUM( * [v.value for v in DomainCategory.__members__.values()], name="DomainCategory"), nullable=True) asset_type = sqlalchemy.Column( postgresql.ENUM( * [v.value for v in AssetType.__members__.values()], name="AssetType"), nullable=True) class Owner(Base, DbMixin): __tablename__ = "owner" id = sqlalchemy.Column(sqlalchemy.CHAR(9), primary_key=True) name = sqlalchemy.Column(sqlalchemy.TEXT, nullable=False)
Python
0.000013
@@ -1,22 +1,12 @@ -from enum import -E +e num%0A @@ -306,24 +306,37 @@ d=engine)%0A%0A%0A +@enum.unique%0A class Domain @@ -344,16 +344,21 @@ ategory( +enum. Enum):%0A @@ -728,16 +728,29 @@ tion%22%0A%0A%0A +@enum.unique%0A class As @@ -757,16 +757,21 @@ setType( +enum. Enum):%0A @@ -954,18 +954,16 @@ DbMixin -() :%0A __ @@ -1694,33 +1694,16 @@ lf, key) -%0A for key @@ -1715,13 +1715,8 @@ eys%7D - == %7B %0A @@ -1728,21 +1728,20 @@ - +== %7B key: get @@ -1760,65 +1760,221 @@ key) -%0A + for key in keys%7D) # yapf: disable%0A%0A%0Adef sql_enum(enum: typing.Type%5Benum.Enum%5D):%0A return type(enum.__name__, (), %7B%0A %22__members__%22: %7Bv.value: v for -key +v in -keys%0A %7D) +enum.__members__.values()%7D%0A %7D) # yapf: disable %0A%0A%0Ac @@ -2667,90 +2667,33 @@ NUM( -%0A * %5Bv.value for v in DomainCategory.__members__.values()%5D,%0A +sql_enum(DomainCategory), nam @@ -2800,85 +2800,28 @@ NUM( -%0A * %5Bv.value for v in AssetType.__members__.values()%5D,%0A +sql_enum(AssetType), nam @@ -2835,24 +2835,16 @@ tType%22), -%0A nullabl
b84eb5f015a53a84975955a2b239e577d539f338
Fix for q-dtype
cupyx/jit/_typerules.py
cupyx/jit/_typerules.py
import ast import numpy from cupy._logic import ops from cupy._math import arithmetic from cupy._logic import comparison from cupy._binary import elementwise from cupy import core from cupyx.jit import _types _numpy_scalar_true_divide = core.create_ufunc( 'numpy_scalar_true_divide', ('??->d', '?i->d', 'i?->d', 'bb->f', 'bi->d', 'BB->f', 'Bi->d', 'hh->f', 'HH->f', 'ii->d', 'II->d', 'll->d', 'LL->d', 'qq->d', 'QQ->d', 'ee->e', 'ff->f', 'dd->d', 'FF->F', 'DD->D'), 'out0 = (out0_type)in0 / (out0_type)in1', ) _numpy_scalar_invert = core.create_ufunc( 'numpy_scalar_invert', ('?->?', 'b->b', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L', 'q->q', 'Q->Q'), 'out0 = ~in0', ) _numpy_scalar_logical_not = core.create_ufunc( 'numpy_scalar_logical_not', ('?->?', 'b->?', 'B->?', 'h->?', 'H->?', 'i->?', 'I->?', 'l->?', 'L->?', 'q->?', 'Q->?', 'e->?', 'f->?', 'd->?', ('F->?', 'out0 = !in0.real() && !in0.imag()'), ('D->?', 'out0 = !in0.real() && !in0.imag()')), 'out0 = !in0', ) _scalar_lt = core.create_comparison('scalar_less', '<') _scalar_lte = core.create_comparison('scalar_less', '<=') _scalar_gt = core.create_comparison('scalar_less', '>') _scalar_gte = core.create_comparison('scalar_less', '>=') _py_ops = { ast.And: lambda x, y: x and y, ast.Or: lambda x, y: x or y, ast.Add: lambda x, y: x + y, ast.Sub: lambda x, y: x - y, ast.Mult: lambda x, y: x * y, ast.Pow: lambda x, y: x ** y, ast.Div: lambda x, y: x / y, ast.FloorDiv: lambda x, y: x // y, ast.Mod: lambda x, y: x % y, ast.LShift: lambda x, y: x << y, ast.RShift: lambda x, y: x >> y, ast.BitOr: lambda x, y: x | y, ast.BitAnd: lambda x, y: x & y, ast.BitXor: lambda x, y: x ^ y, ast.Invert: lambda x: ~x, ast.Not: lambda x: not x, ast.Eq: lambda x, y: x == y, ast.NotEq: lambda x, y: x != y, ast.Lt: lambda x, y: x < y, ast.LtE: lambda x, y: x <= y, ast.Gt: lambda x, y: x > y, ast.GtE: lambda x, y: x >= y, ast.USub: lambda x: -x, } _numpy_ops = { ast.And: ops.logical_and, ast.Or: ops.logical_or, ast.Add: arithmetic.add, ast.Sub: arithmetic.subtract, ast.Mult: arithmetic.multiply, ast.Pow: arithmetic.power, ast.Div: _numpy_scalar_true_divide, ast.FloorDiv: arithmetic.floor_divide, ast.Mod: arithmetic.remainder, ast.LShift: elementwise.left_shift, ast.RShift: elementwise.right_shift, ast.BitOr: elementwise.bitwise_or, ast.BitAnd: elementwise.bitwise_and, ast.BitXor: elementwise.bitwise_xor, ast.Invert: _numpy_scalar_invert, ast.Not: _numpy_scalar_logical_not, ast.Eq: comparison.equal, ast.NotEq: comparison.not_equal, ast.Lt: _scalar_lt, ast.LtE: _scalar_lte, ast.Gt: _scalar_gt, ast.GtE: _scalar_gte, ast.USub: arithmetic.negative, } def get_pyfunc(op_type): return _py_ops[op_type] def get_ufunc(mode, op_type): if mode == 'numpy': return _numpy_ops[op_type] if mode == 'cuda': raise NotImplementedError assert False def get_ctype_from_scalar(mode, x): if isinstance(x, numpy.generic): return _types.Scalar(x.dtype) if mode == 'numpy': if isinstance(x, bool): return _types.Scalar(numpy.bool_) if isinstance(x, int): return _types.Scalar(numpy.int64) if isinstance(x, float): return _types.Scalar(numpy.float64) if isinstance(x, complex): return _types.Scalar(numpy.complex128) if mode == 'cuda': if isinstance(x, bool): return _types.Scalar(numpy.bool_) if isinstance(x, int): if -(1 << 31) <= x < (1 << 31): return _types.Scalar(numpy.int32) return _types.Scalar(numpy.int64) if isinstance(x, float): return _types.Scalar(numpy.float32) if isinstance(x, complex): return _types.Scalar(numpy.complex64) raise NotImplementedError(f'{x} is not scalar object.') _suffix_literals_dict = { numpy.dtype('float64'): '', numpy.dtype('float32'): 'f', numpy.dtype('int64'): 'll', numpy.dtype('int32'): '', numpy.dtype('uint64'): 'ull', numpy.dtype('uint32'): 'u', numpy.dtype('bool'): '', } def get_cuda_code_from_constant(x, ctype): dtype = ctype.dtype suffix_literal = _suffix_literals_dict.get(dtype) if suffix_literal is not None: s = str(x).lower() return f'{s}{suffix_literal}' ctype = str(ctype) if dtype.kind == 'c': return f'{ctype}({x.real}, {x.imag})' if ' ' in ctype: return f'({ctype}){x}' return f'{ctype}({x})'
Python
0.000002
@@ -4180,24 +4180,59 @@ 64'): 'll',%0A + numpy.dtype('longlong'): 'll',%0A numpy.dt @@ -4279,24 +4279,61 @@ 4'): 'ull',%0A + numpy.dtype('ulonglong'): 'ull',%0A numpy.dt
6ac28c1daa0173ae5baa66c9cb020e9c673973ff
Add info for lftp@4.8.1 (#5452)
var/spack/repos/builtin/packages/lftp/package.py
var/spack/repos/builtin/packages/lftp/package.py
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Lftp(AutotoolsPackage): """LFTP is a sophisticated file transfer program supporting a number of network protocols (ftp, http, sftp, fish, torrent).""" homepage = "http://lftp.yar.ru/" url = "http://lftp.yar.ru/ftp/lftp-4.7.7.tar.gz" version('4.7.7', 'ddc71b3b11a1af465e829075ae14b3ff') depends_on('expat') depends_on('libiconv') depends_on('ncurses') depends_on('openssl') depends_on('readline') depends_on('zlib') def configure_args(self): return [ '--with-expat={0}'.format(self.spec['expat'].prefix), '--with-libiconv={0}'.format(self.spec['libiconv'].prefix), '--with-openssl={0}'.format(self.spec['openssl'].prefix), '--with-readline={0}'.format(self.spec['readline'].prefix), '--with-zlib={0}'.format(self.spec['zlib'].prefix), '--disable-dependency-tracking', ]
Python
0
@@ -1514,16 +1514,73 @@ ar.gz%22%0A%0A + version('4.8.1', '419b27c016d968a0226b2e5df1454c22')%0A vers
e2be8a486c9d13f98d9f14ae7b0cddf8225cf1b3
Add boolswitch test
test/test_apv_rename.py
test/test_apv_rename.py
""" Copyright (c) 2017, Michael Sonntag (sonntag@bio.lmu.de) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted under the terms of the BSD License. See LICENSE file in the root of the project. """ import os import shutil import tempfile import unittest import uuid class RenameTest(unittest.TestCase): def setUp(self): dir_name = "bren_%s" % str(uuid.uuid1()) self.tmpdir = os.path.join(tempfile.gettempdir(), dir_name) if not os.path.isdir(self.tmpdir): os.makedirs(self.tmpdir) for i in range(0, 3): tmp_file = "tmpfile_%s.jpg" % i open(os.path.join(self.tmpdir, tmp_file), 'a').close() def tearDown(self): shutil.rmtree(self.tmpdir) def test_tmp_files(self): self.assertEqual(len(os.listdir(self.tmpdir)), 3)
Python
0.000001
@@ -327,16 +327,57 @@ t uuid%0A%0A +from bren.bulk_rename import BulkRename%0A%0A %0Aclass R @@ -914,8 +914,160 @@ r)), 3)%0A +%0A def test_bool_switch(self):%0A self.assertEqual(BulkRename._bool_switch(1), True)%0A self.assertEqual(BulkRename._bool_switch(2), False)%0A
59faede78ad5d763c7c9fa1763e3e7cac67c1ca6
Move Circle inside CxDeriv
cxroots/CxDerivative.py
cxroots/CxDerivative.py
from __future__ import division import numpy as np from numpy import inf, pi import scipy.integrate import math from cxroots.Contours import Circle, Rectangle def CxDeriv(f, contour=None): """ Compute derivaive of an analytic function using Cauchy's Integral Formula for Derivatives """ if contour is None: C = lambda z0: Circle(z0, 1e-3) else: C = lambda z0: contour def df(z0, n): integrand = lambda z: f(z)/(z-z0)**(n+1) return C(z0).integrate(integrand) * math.factorial(n)/(2j*pi) return np.vectorize(df)
Python
0
@@ -110,56 +110,8 @@ th%0A%0A -from cxroots.Contours import Circle, Rectangle%0A%0A def @@ -258,16 +258,54 @@ s None:%0A +%09%09from cxroots.Contours import Circle%0A %09%09C = la
2df7947f02fd39e05bf18a89f904e273d17c63ca
add v0.9.29 (#23606)
var/spack/repos/builtin/packages/lmdb/package.py
var/spack/repos/builtin/packages/lmdb/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Lmdb(MakefilePackage): """Symas LMDB is an extraordinarily fast, memory-efficient database we developed for the Symas OpenLDAP Project. With memory-mapped files, it has the read performance of a pure in-memory database while retaining the persistence of standard disk-based databases.""" homepage = "https://lmdb.tech/" url = "https://github.com/LMDB/lmdb/archive/LMDB_0.9.21.tar.gz" version('0.9.24', sha256='44602436c52c29d4f301f55f6fd8115f945469b868348e3cddaf91ab2473ea26') version('0.9.22', sha256='f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28') version('0.9.21', sha256='1187b635a4cc415bb6972bba346121f81edd996e99b8f0816151d4090f90b559') version('0.9.16', sha256='49d7b40949f2ced9bc8b23ea6a89e75471a1c9126537a8b268c318a00b84322b') build_directory = 'libraries/liblmdb' @property def install_targets(self): return ['prefix={0}'.format(self.prefix), 'install'] @run_after('install') def install_pkgconfig(self): mkdirp(self.prefix.lib.pkgconfig) with open(join_path(self.prefix.lib.pkgconfig, 'lmdb.pc'), 'w') as f: f.write('prefix={0}\n'.format(self.prefix)) f.write('exec_prefix=${prefix}\n') f.write('libdir={0}\n'.format(self.prefix.lib)) f.write('includedir={0}\n'.format(self.prefix.include)) f.write('\n') f.write('Name: LMDB\n') f.write('Description: Symas LMDB is an extraordinarily fast, ' 'memory-efficient database.\n') f.write('Version: {0}\n'.format(self.spec.version)) f.write('Cflags: -I${includedir}\n') f.write('Libs: -L${libdir} -llmdb\n')
Python
0
@@ -634,16 +634,113 @@ ar.gz%22%0A%0A + version('0.9.29', sha256='22054926b426c66d8f2bc22071365df6e35f3aacf19ad943bc6167d4cae3bebb')%0A vers
418d9f07dcda2e0c141be0bca4595447fa09c280
Add 19.0.4 (#11415)
var/spack/repos/builtin/packages/mesa/package.py
var/spack/repos/builtin/packages/mesa/package.py
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * import sys class Mesa(MesonPackage): """Mesa is an open-source implementation of the OpenGL specification - a system for rendering interactive 3D graphics.""" homepage = "http://www.mesa3d.org" # Note that we always want to build from the git repo instead of a # tarball since the tarball has pre-generated files for certain versions # of LLVM while the git repo doesn't so it can adapt at build time to # whatever version of LLVM you're using. git = "https://gitlab.freedesktop.org/mesa/mesa.git" version('develop', branch='master') version('19.1.develop', branch='19.1') version('19.0.develop', branch='19.0') version('19.0.3', tag='mesa-19.0.3', preferred=True) version('19.0.2', tag='mesa-19.0.2') version('19.0.1', tag='mesa-19.0.1') version('19.0.0', tag='mesa-19.0.0') depends_on('meson@0.45:', type='build') depends_on('binutils', type='build') depends_on('bison', type='build') depends_on('flex', type='build') depends_on('gettext', type='build') depends_on('pkgconfig', type='build') depends_on('python@3:', type='build') depends_on('py-mako@0.8.0:', type='build') depends_on('libxml2') depends_on('zlib') depends_on('expat') # Internal options variant('llvm', default=True, description="Enable LLVM.") variant('swr', values=any_combination_of('avx', 'avx2', 'knl', 'skx'), description="Enable the SWR driver.") # conflicts('~llvm', when='~swr=none') # Front ends variant('osmesa', default=True, description="Enable the OSMesa frontend.") is_linux = sys.platform.startswith('linux') variant('glx', default=is_linux, description="Enable the GLX frontend.") # TODO: effectively deal with EGL. The implications of this have not been # worked through yet # variant('egl', default=False, description="Enable the EGL frontend.") # TODO: Effectively deal with hardware drivers # The implication of this is enabling DRI, among other things, and # needing to check which llvm targets were built (ptx or amdgpu, etc.) # Back ends variant('opengl', default=True, description="Enable full OpenGL support.") variant('opengles', default=False, description="Enable OpenGL ES support.") # Provides provides('gl@4.5', when='+opengl') provides('glx@1.4', when='+glx') # provides('egl@1.5', when='+egl') # Variant dependencies depends_on('llvm@6:', when='+llvm') depends_on('libx11', when='+glx') depends_on('libxcb', when='+glx') depends_on('libxext', when='+glx') depends_on('glproto@1.4.14:', when='+glx', type='build') # Fix glproto dependency for glx=gallium-xlib # https://gitlab.freedesktop.org/mesa/mesa/merge_requests/806 patch('glproto-mr806.patch', when='@19.0.0:19.0.999') def meson_args(self): spec = self.spec args = [ '-Dglvnd=false', '-Dgallium-nine=false', '-Dgallium-omx=disabled', '-Dgallium-opencl=disabled', '-Dgallium-va=false', '-Dgallium-vdpau=false', '-Dgallium-xa=false', '-Dgallium-xvmc=false', '-Dvulkan-drivers='] args_platforms = [] args_gallium_drivers = ['swrast'] args_dri_drivers = [] num_frontends = 0 if '+osmesa' in spec: num_frontends += 1 args.append('-Dosmesa=gallium') else: args.append('-Dosmesa=disabled') if '+glx' in spec: num_frontends += 1 args.append('-Dglx=gallium-xlib') args_platforms.append('x11') else: args.append('-Dglx=disabled') if '+egl' in spec: num_frontends += 1 args.extend(['-Degl=true', '-Dgbm=true']) else: args.extend(['-Degl=false', '-Dgbm=false']) if '+opengl' in spec: args.append('-Dopengl=true') else: args.append('-Dopengl=false') if '+opengles' in spec: args.extend(['-Dgles1=true', '-Dgles2=true']) else: args.extend(['-Dgles1=false', '-Dgles2=false']) if '+egl' in spec or '+osmesa' in spec: args_platforms.append('surfaceless') if num_frontends > 1: args.append('-Dshared-glapi=true') else: args.append('-Dshared-glapi=false') if '+llvm' in spec: args.append('-Dllvm=true') if '+link_dylib' in spec['llvm']: args.append('-Dshared-llvm=true') else: args.append('-Dshared-llvm=false') else: args.append('-Dllvm=false') args_swr_arches = [] if 'swr=avx' in spec: args_swr_arches.append('avx') if 'swr=avx2' in spec: args_swr_arches.append('avx2') if 'swr=knl' in spec: args_swr_arches.append('knl') if 'swr=skx' in spec: args_swr_arches.append('skx') if args_swr_arches: if '+llvm' not in spec: raise SpecError('Variant swr requires +llvm') args_gallium_drivers.append('swr') args.append('-Dswr-arches=' + ','.join(args_swr_arches)) # Add the remaining list args args.append('-Dplatforms=' + ','.join(args_platforms)) args.append('-Dgallium-drivers=' + ','.join(args_gallium_drivers)) args.append('-Ddri-drivers=' + ','.join(args_dri_drivers)) return args
Python
0.000002
@@ -902,17 +902,17 @@ n('19.0. -3 +4 ', tag=' @@ -921,17 +921,17 @@ sa-19.0. -3 +4 ', prefe @@ -941,16 +941,57 @@ d=True)%0A + version('19.0.3', tag='mesa-19.0.3')%0A vers @@ -3037,16 +3037,76 @@ sts/806%0A + # Was included in the upstream patch release for 19.0.4%0A patc @@ -3153,11 +3153,9 @@ 9.0. -999 +3 ')%0A%0A
7abc503d6aa492f2340ab0b98d1f66892180ba19
Fix some test error
tests/test_blueprint.py
tests/test_blueprint.py
from wood import Wood from wood.support import Blueprint def make_example_blueprint(): b = Blueprint() b.empty(r"/example","example") return b def test_blueprint_can_add_empty_handler(): b = make_example_blueprint() assert b != None def test_blueprint_can_add_handlers_to_wood(): w = Wood() b = make_example_blueprint() b.to(w) assert len(w.application.handlers) > 0 def test_blueprint_can_get_new_wood(): b = make_example_blueprint() w.get_wood() assert len(w.application.handlers) > 0
Python
0.002705
@@ -485,16 +485,20 @@ )%0A%0A w + = b .get_woo
d5438347980b4ed3f4a798b8c1019b87691f28bd
Bump version
oi/version.py
oi/version.py
VERSION = '0.2.0'
Python
0
@@ -8,11 +8,11 @@ = '0.2. -0 +1 '%0A
676440a464d695146361eb1bdb684e121bf41a42
fix simple_date parsing
solution/__init__.py
solution/__init__.py
# coding=utf-8 """ ============================= Solution ============================= An amazing form solution :copyright: `Juan-Pablo Scaletti <http://jpscaletti.com>`_. :license: MIT, see LICENSE for more details. """ from .form import Form # noqa from .formset import FormSet # noqa from .fields import * # noqa from .validators import * # noqa from .utils import Markup, get_html_attrs, to_unicode # noqa __version__ = '5.2.5'
Python
0.000345
@@ -457,11 +457,11 @@ = '5.2. -5 +6 '%0A
0a1a8a5bcea081b2b3f86f8d7f4ee39e261efad2
Add license to kzip writer
verilog/tools/kythe/verilog_kythe_kzip_writer.py
verilog/tools/kythe/verilog_kythe_kzip_writer.py
"""Produces Kythe KZip from the given SystemVerilog source files.""" import hashlib import os import sys import zipfile from absl import app from absl import flags from absl import logging from collections.abc import Sequence from third_party.proto.kythe import analysis_pb2 from verilog.tools.kythe import filelist_parser flags.DEFINE_string( "filelist_path", "", ("The path to the filelist which contains the names of System Verilog " "files. The files should be ordered by definition dependencies.")) flags.DEFINE_string( "filelist_root", "", ("The absolute location which we prepend to the files in the filelist " "(where listed files are relative to).")) flags.DEFINE_string("code_revision", "", "Version control revision at which this code was taken.") flags.DEFINE_string("corpus", "", "Corpus (e.g., the project) to which this code belongs.") flags.DEFINE_string("output_path", "", "Path where to write the kzip.") FLAGS = flags.FLAGS def PrintUsage(binary_name: str): print( """usage: {binary_name} [options] --filelist_path FILE --filelist_root FILE --output_path FILE Produces Kythe KZip from the given SystemVerilog source files. Input: A file which lists paths to the SystemVerilog top-level translation unit files (one per line; the path is relative to the location of the filelist). Output: Produces Kythe KZip (https://kythe.io/docs/kythe-kzip.html).""".format( binary_name=binary_name)) def Sha256(content: bytes) -> str: """Returns SHA256 of the content as HEX string.""" m = hashlib.sha256() m.update(content) return m.hexdigest() def main(argv: Sequence[str]) -> None: if not FLAGS.filelist_path: PrintUsage(argv[0]) raise app.UsageError("No --filelist_path was specified.") if not FLAGS.filelist_root: PrintUsage(argv[0]) raise app.UsageError("No --filelist_root was specified.") if not FLAGS.output_path: PrintUsage(argv[0]) raise app.UsageError("No --output_path was specified.") # open the filelist and parse it # collect the file paths relative to the root # indexed compilation unit compilation = analysis_pb2.IndexedCompilation() if FLAGS.code_revision: compilation.index.revisions.append(FLAGS.code_revision) unit = compilation.unit unit.v_name.corpus = FLAGS.corpus unit.v_name.root = FLAGS.filelist_root unit.v_name.language = "verilog" # The filelist path is arbitrary. We simplify it to always be "./filelist" unit.argument.append("--f=filelist") # Load the filelist with open(FLAGS.filelist_path, "rb") as f: filelist_content = f.read() filelist = filelist_parser.ParseFileList( filelist_content.decode(encoding=sys.getdefaultencoding())) # unit.required_input # input.info.path & v_name (path and root) # Zip files, populate required_input and zip the unit with zipfile.ZipFile(FLAGS.output_path, mode="w") as zf: # Add the filelist to the files digest = Sha256(filelist_content) zf.writestr(os.path.join("root", "files", digest), filelist_content) req_input = unit.required_input.add() req_input.info.path = "filelist" req_input.info.digest = digest # Add all files for file_path in filelist.files: # The filelist references the files relative to the filelist location. path_prefix = os.path.dirname(FLAGS.filelist_path) with open(os.path.join(path_prefix, file_path), "rb") as f: content = f.read() digest = Sha256(content) zf.writestr(os.path.join("root", "files", digest), content) req_input = unit.required_input.add() req_input.info.path = file_path req_input.info.digest = digest req_input.v_name.path = file_path req_input.v_name.root = FLAGS.filelist_root # Add the compilation unit serialized_unit = compilation.SerializeToString() zf.writestr( os.path.join("root", "pbunits", Sha256(serialized_unit)), serialized_unit) if __name__ == "__main__": app.run(main)
Python
0
@@ -1,8 +1,621 @@ +#!/usr/bin/env python3%0A# Copyright 2017-2020 The Verible Authors.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A %22%22%22Produ
b3526d2818b423b6fd6e49ca157781794812ec4b
Remove unused fixture
test/test_csv_writer.py
test/test_csv_writer.py
# encoding: utf-8 """ .. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com> """ from __future__ import absolute_import import collections import itertools import pytablewriter as ptw import pytablereader as ptr import pytest from .data import header_list from .data import value_matrix from .data import value_matrix_with_none from .data import mix_header_list from .data import mix_value_matrix from .data import value_matrix_iter Data = collections.namedtuple("Data", "col_delim header value expected") normal_test_data_list = [ Data( col_delim=",", header=header_list, value=value_matrix, expected=""""a","b","c","dd","e" 1,123.1,"a",1.0,"1" 2,2.2,"bb",2.2,"2.2" 3,3.3,"ccc",3.0,"cccc" """ ), Data( col_delim=",", header=header_list, value=[], expected=""""a","b","c","dd","e" """ ), Data( col_delim=",", header=[], value=value_matrix, expected="""1,123.1,"a",1.0,"1" 2,2.2,"bb",2.2,"2.2" 3,3.3,"ccc",3.0,"cccc" """ ), Data( col_delim="\t", header=None, value=value_matrix, expected="""1\t123.1\t"a"\t1.0\t"1" 2\t2.2\t"bb"\t2.2\t"2.2" 3\t3.3\t"ccc"\t3.0\t"cccc" """ ), Data( col_delim=",", header=header_list, value=value_matrix_with_none, expected=""""a","b","c","dd","e" 1,,"a",1.0, ,2.2,,2.2,"2.2" 3,3.3,"ccc",,"cccc" ,,,, """ ), Data( col_delim=",", header=mix_header_list, value=mix_value_matrix, expected=""""i","f","c","if","ifc","bool","inf","nan","mix_num","time" 1,1.10,"aa",1.0,"1",True,inf,nan,1.0,"2017-01-01 00:00:00" 2,2.20,"bbb",2.2,"2.2",False,inf,nan,inf,"2017-01-02 03:04:05+0900" 3,3.33,"cccc",-3.0,"ccc",True,inf,nan,nan,"2017-01-01 00:00:00" """ ), ] exception_test_data_list = [ Data( col_delim=",", header=header, value=value, expected=ptw.EmptyTableDataError ) for header, value in itertools.product([None, [], ""], [None, [], ""]) ] table_writer_class = ptw.CsvTableWriter class Test_CsvTableWriter_write_new_line: def test_normal(self, capsys): writer = table_writer_class() writer.write_null_line() out, _err = capsys.readouterr() assert out == "\n" class Test_CsvTableWriter_set_table_data: def test_normal(self, capsys): writer = table_writer_class() csv_text = """"a","b","c","dd","e" 1,,"a",1.0, ,2.2,,2.2,"2.2" 3,3.3,"ccc",,"cccc" """ loader = ptr.CsvTableTextLoader(csv_text) for tabledata in loader.load(): writer.set_table_data(tabledata) assert writer.table_name == "csv1" assert writer.header_list == ["a", "b", "c", "dd", "e"] assert writer.value_matrix == [ ['1', '', 'a', '1.0', ''], ['', '2.2', '', '2.2', '2.2'], ['3', '3.3', 'ccc', '', 'cccc'] ] class Test_CsvTableWriter_write_table: @pytest.mark.parametrize(["col_delim", "header", "value", "expected"], [ [data.col_delim, data.header, data.value, data.expected] for data in normal_test_data_list ]) def test_normal(self, capsys, col_delim, header, value, expected): writer = table_writer_class() writer.column_delimiter = col_delim writer.header_list = header writer.value_matrix = value writer.write_table() out, _err = capsys.readouterr() assert out == expected @pytest.mark.parametrize(["header", "value", "expected"], [ [data.header, data.value, data.expected] for data in exception_test_data_list ]) def test_exception(self, capsys, header, value, expected): writer = table_writer_class() writer.header_list = header writer.value_matrix = value with pytest.raises(expected): writer.write_table() class Test_CsvTableWriter_write_table_iter: @pytest.mark.parametrize(["table", "header", "value", "expected"], [ [ "tablename", ["ha", "hb", "hc"], value_matrix_iter, """"ha","hb","hc" 1,2,3 11,12,13 1,2,3 11,12,13 101,102,103 1001,1002,1003 """, ], ]) def test_normal(self, capsys, table, header, value, expected): writer = table_writer_class() writer.table_name = table writer.header_list = header writer.value_matrix = value writer.iteration_length = len(value) writer.write_table_iter() out, _err = capsys.readouterr() assert out == expected @pytest.mark.parametrize( ["header", "value", "expected"], [ [data.header, data.value, data.expected] for data in exception_test_data_list ] ) def test_exception(self, capsys, header, value, expected): writer = table_writer_class() writer.header_list = header writer.value_matrix = value with pytest.raises(expected): writer.write_table_iter()
Python
0.000001
@@ -2374,32 +2374,24 @@ _normal(self -, capsys ):%0A w
c9f634ccea4b034907bb403a683945ace373c97d
Add unary + - support for atoms
solver/core/atoms.py
solver/core/atoms.py
class Atom(object): """ Base class for any atomic type """ def __add__(self, other): from .operations import Add return Add(self, other) def __radd__(self, other): return self.__add__(other) def __mul__(self, other): from .operations import Mul return Mul(self, other) def __rmul__(self, other): return self.__mul__(other) def __pow__(self, power, modulo=None): from .operations import Pow return Pow(self, power) def __div__(self, other): from .operations import Div return Div(self, other) def __repr__(self): try: return '{}({})'.format(self.__class__.__name__, self.args) except AttributeError: return '{}'.format(self.__class__.__name__) class Undefined(Atom): pass class Number(Atom): def __new__(cls, *args): if len(args) == 1: if isinstance(args[0], Number): return args[0] if isinstance(args[0], int): return super(Number, cls).__new__(Integer) class Integer(Number): def __init__(self, value): self.value = value def __add__(self, other): if isinstance(other, Integer): return Integer(self.value + other.value) else: Atom.__mul__(other) def __iadd__(self, other): return self + other def __mul__(self, other): if isinstance(other, Integer): return Integer(self.value * other.value) else: Atom.__mul__(other) def __rmul__(self, other): return self.__mul__(other) def __pow__(self, power, modulo=None): if isinstance(power, Integer): if power.value == 0 and self.value == 0: return Undefined() return Integer(self.value ** power.value) else: Atom.__mul__(power) def __eq__(self, other): if isinstance(other, Integer): return self.value == other.value else: return False def __ne__(self, other): return not self.__eq__(other)
Python
0.001074
@@ -61,16 +61,109 @@ %22%22%22%0A%0A + def __pos__(self):%0A return self%0A%0A def __neg__(self):%0A return -1 * self%0A%0A def
f6c6ff376974f604b2b4a7b62ad28fd56a264c55
Add empty testing scaffolding.
test/test_exceptions.py
test/test_exceptions.py
#!/usr/bin/env python3 import pyglab.exceptions as ex
Python
0
@@ -48,8 +48,228 @@ s as ex%0A +import unittest as ut%0A%0Aclass TestBadRequest(ut.TestCase):%0A def test_throw(self):%0A pass%0A def test_statuscode(self):%0A pass%0A def test_message(self):%0A pass%0A def test_body(self):%0A pass%0A
b0581aef251a3bc03e1a49e89c3b214114e20828
add unit test
tests/test_migration.py
tests/test_migration.py
import os import asyncio from migration.migration import Migration import pytest from pytest_lamp import asyncserver import whisper def test_migration_example(): pass def test_extract_wsp(monkeypatch): worker = Migration('/opt/graphite/storage/whisper/zon', '127.0.0.1', 2003) def mock_return(path): yield ('/opt/graphite/storage/whisper/zon', [], ['where.wsp']) monkeypatch.setattr(os, 'walk', mock_return) relative_path, full_path = next(worker._extract_wsp()) assert relative_path == '/where.wsp' assert full_path == '/opt/graphite/storage/whisper/zon/where.wsp' @pytest.mark.asyncio @asyncserver('127.0.0.1', 2003) async def test_graphite_connect(): loop = asyncio.get_event_loop() worker = Migration('/opt/graphite/storage/whisper/zon', '127.0.0.1', 2003, loop=loop) await worker.connect_to_graphite() message = "pytest-lamp" reader = worker.graphite_conn._reader writer = worker.graphite_conn._writer writer.write(message.encode("ascii")) await writer.drain() writer.write_eof() await writer.drain() data = (await reader.read()).decode("utf-8") writer.close() assert message == data await worker.close_conn_to_graphite() @pytest.mark.asyncio async def test_read_from_wsps(monkeypatch): loop = asyncio.get_event_loop() worker = Migration('/opt/graphite/storage/whisper/zon', '127.0.0.1', 2003, loop=loop) def fetch_mock_return(path, i): return ((1483668388, 1483668392, 2), [7, 8]) monkeypatch.setattr(whisper, 'fetch', fetch_mock_return) def walk_mock_return(path): yield ('/opt/graphite/storage/whisper/zon', [], ['where.wsp']) monkeypatch.setattr(os, 'walk', walk_mock_return) await worker.read_from_wsps() num = worker.queue.qsize() # two datapoints and one terminator assert num == 3 data1 = await worker.queue.get() data2 = await worker.queue.get() terminator = await worker.queue.get() assert data1 == ('zon.where', 7, 1483668388) assert data2 == ('zon.where', 8, 1483668390) assert terminator == None @pytest.mark.asyncio @asyncserver('127.0.0.1', 2003) async def test_write_to_graphite(): loop = asyncio.get_event_loop() worker = Migration('/opt/graphite/storage/whisper/zon', '127.0.0.1', 2003, loop=loop) await worker.connect_to_graphite() # prefill some data into queue data = ('zon.test.metric', 7, 1483668388) await worker.queue.put(data) await worker.queue.put(None) # send data await worker.write_to_graphite() await worker.close_conn_to_graphite() @pytest.mark.asyncio async def test_send_one_wsp(monkeypatch): pass
Python
0.000001
@@ -2691,76 +2691,953 @@ )%0A%0A%0A -@pytest.mark.asyncio%0Aasync def test_send_one_wsp(monkeypatch):%0A pass +async def handler(reader, writer):%0A data = (await reader.read())%0A assert data == 'mondev 7 1483668388%5Cn'%0A writer.write(data)%0A await writer.drain()%0A writer.close()%0A%0A%0A@pytest.mark.asyncio%0Aasync def test_send_one_wsp(monkeypatch):%0A loop = asyncio.get_event_loop()%0A host = '127.0.0.1'%0A port = 2003%0A server = await asyncio.start_server(handler, host, port)%0A worker = Migration('/opt/graphite/storage/whisper/zon',%0A host, port, loop=loop)%0A await worker.connect_to_graphite()%0A def fetch_mock_return(path, i):%0A return ((1483668388, 1483668390, 2), %5B7%5D)%0A%0A monkeypatch.setattr(whisper, 'fetch', fetch_mock_return)%0A%0A def exist_mock_return(path):%0A return True%0A%0A monkeypatch.setattr(os.path, 'exists', exist_mock_return)%0A storage = '/zon/where'%0A metric = 'velocity'%0A new_metric = 'mondev'%0A%0A await worker.send_one_wsp(storage, metric, new_metric) %0A server.close() %0A
0a438f1d10e9e0e31e5c430f921b618b8a625154
Fix line endings on Windows
building.py
building.py
import subprocess import threading import os import sublime from .fuse_util import getFusePathFromSettings, getSetting from .log import log class BuildManager: def __init__(self, fuseNotFoundHandler): self.builds = {} self.fuseNotFoundHandler = fuseNotFoundHandler self.previousBuildCommand = None def preview(self, target, path): fusePath = getFusePathFromSettings() start_preview = [fusePath, "preview", "--target=" + target, "--name=Sublime_Text_3", path] name = target.capitalize() + " Preview" self._start(target, start_preview, name, None) def build(self, target, run, working_dir, error_handler): platform = str(sublime.platform()) if self._isUnsupported(platform, target): error_handler(target + " builds are not available on " + platform + ".") return name = target.capitalize() + " Build" cmd = self._tryCreateBuildCommand(target, run) if not cmd: error_handler("No Fuse build target set.\n\nGo to Tools/Build With... to choose one.\n\nFuture attempts to build will use that.") return self.previousBuildCommand = cmd self._start(target, cmd, name, working_dir) def _tryCreateBuildCommand(self, target, run): log().info(target) if target != "Default": return [getFusePathFromSettings(), "build", "-t=" + target, "-c=Release"] + (["-r"] if run else []) if self.previousBuildCommand: return self.previousBuildCommand return None def _start(self, target, cmd, name, working_dir): if name in self.builds: self.builds[name].stop() build = BuildInstance(cmd, name, working_dir, self.fuseNotFoundHandler) self.builds[name] = build build.start() def _isUnsupported(self, platform, target): unsupported = { "windows" : [ "ios", "cmake"], "osx" : ["dotnet", "msvc"] } return platform.lower() in unsupported and target.lower() in unsupported[platform] class BuildInstance(threading.Thread): def __init__(self, cmd, title, working_dir, fuseNotFoundHandler): threading.Thread.__init__(self) self.cmd = cmd self.daemon = True self.output = OutputView(title) if getSetting("fuse_show_build_results") else NullOutputView() self.fuseNotFoundHandler = fuseNotFoundHandler self.process = None self.working_dir = working_dir def run(self): log().info("Opening subprocess %s", str(self.cmd)) try: creationflags = 0x08000000 if os.name == "nt" else 0 self.process = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, creationflags=creationflags, cwd=self.working_dir) except: self.fuseNotFoundHandler() self.output.close() return for line in iter(self.process.stdout.readline,b''): self.output.append(line.decode("utf-8")) self.process.wait() def stop(self): if self.process: try: self.process.kill() except ProcessLookupError: pass #It died by itself, which is fine self.output.close() class OutputView: def __init__(self, title): self.title = title window = sublime.active_window() self.view = window.new_file() self.view.set_scratch(True) self.view.set_name(title) def append(self, line): self.view.run_command("append", {"characters": line}) def close(self): try: window = self.view.window() groupIndex, viewIndex = window.get_view_index(self.view) window.run_command("close_by_index", { "group": groupIndex, "index": viewIndex }) except: pass #Failing to close a tab is not critical class NullOutputView: def append(self, line): pass def close(self): pass
Python
0.000071
@@ -2664,16 +2664,33 @@ %22utf-8%22) +.replace('%5Cr','') )%0A%09%09self
f13982144a2a0710af8e082dd01d73f036f026fd
Use clean_system fixture on pypackage test
tests/test_pypackage.py
tests/test_pypackage.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ test_pypackage -------------- Tests formerly known from a unittest residing in test_generate.py named TestPyPackage.test_cookiecutter_pypackage """ from __future__ import unicode_literals import os import subprocess import pytest from cookiecutter import utils from tests.skipif_markers import skipif_travis, skipif_no_network @pytest.fixture(scope='function') def remove_additional_dirs(request): """ Remove special directories which are creating during the tests. """ def fin_remove_additional_dirs(): if os.path.isdir('cookiecutter-pypackage'): utils.rmtree('cookiecutter-pypackage') if os.path.isdir('boilerplate'): utils.rmtree('boilerplate') request.addfinalizer(fin_remove_additional_dirs) @skipif_travis @skipif_no_network @pytest.mark.usefixtures('remove_additional_dirs') def test_cookiecutter_pypackage(): """ Tests that https://github.com/audreyr/cookiecutter-pypackage.git works. """ proc = subprocess.Popen( 'git clone https://github.com/audreyr/cookiecutter-pypackage.git', stdin=subprocess.PIPE, shell=True ) proc.wait() proc = subprocess.Popen( 'cookiecutter --no-input cookiecutter-pypackage/', stdin=subprocess.PIPE, shell=True ) proc.wait() assert os.path.isdir('cookiecutter-pypackage') assert os.path.isfile('boilerplate/README.rst')
Python
0
@@ -867,16 +867,32 @@ ixtures( +'clean_system', 'remove_
55bc60570fec684905132c4899c1830cbfed428c
add the missing setup for logging
volttron/platform/vip/agent/subsystems/health.py
volttron/platform/vip/agent/subsystems/health.py
# Copyright (c) 2015, Battelle Memorial Institute # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation # are those of the authors and should not be interpreted as representing # official policies, either expressed or implied, of the FreeBSD # Project. # # This material was prepared as an account of work sponsored by an # agency of the United States Government. Neither the United States # Government nor the United States Department of Energy, nor Battelle, # nor any of their employees, nor any jurisdiction or organization that # has cooperated in the development of these materials, makes any # warranty, express or implied, or assumes any legal liability or # responsibility for the accuracy, completeness, or usefulness or any # information, apparatus, product, software, or process disclosed, or # represents that its use would not infringe privately owned rights. # # Reference herein to any specific commercial product, process, or # service by trade name, trademark, manufacturer, or otherwise does not # necessarily constitute or imply its endorsement, recommendation, or # favoring by the United States Government or any agency thereof, or # Battelle Memorial Institute. The views and opinions of authors # expressed herein do not necessarily state or reflect those of the # United States Government or any agency thereof. # # PACIFIC NORTHWEST NATIONAL LABORATORY # operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY # under Contract DE-AC05-76RL01830 import logging import os import weakref from volttron.platform.messaging import topics from volttron.platform.messaging.health import * from .base import SubsystemBase __docformat__ = 'reStructuredText' __version__ = '1.0' """ The health subsystem allows an agent to store it's health in a non-intrusive way. """ _log = logging.getLogger(__name__) class Health(SubsystemBase): def __init__(self, owner, core, rpc): self._owner = owner self._core = weakref.ref(core) self._rpc = weakref.ref(rpc) self._statusobj = Status.build( STATUS_GOOD, status_changed_callback=self._status_changed) def onsetup(sender, **kwargs): rpc.export(self.set_status, 'health.set_status') rpc.export(self.get_status, 'health.get_status') rpc.export(self.send_alert, 'health.send_alert') core.onsetup.connect(onsetup, self) def send_alert(self, alert_key, statusobj): """ An alert_key is a quasi-unique key. A listener to the alert can determine whether to pass the alert on to a higher level based upon the frequency of this alert. :param alert_key: :param context: :return: """ _log.debug("In send alert") if not isinstance(statusobj, Status): raise ValueError('statusobj must be a Status object.') agent_class = self._owner.__class__.__name__ agent_uuid = os.environ.get('AGENT_UUID', '') _log.debug("agent class {}".format(agent_class)) _log.debug("agent uuid {}".format(agent_uuid)) topic = topics.ALERTS(agent_class=agent_class, agent_uuid=agent_uuid) headers = dict(alert_key=alert_key) _log.debug("Headers before sending alert {}".format(headers)) self._owner.vip.pubsub.publish("pubsub", topic=topic.format(), headers=headers, message=statusobj.to_json()) def _status_changed(self): """ Internal function that happens when the status changes state. :return: """ self._owner.vip.heartbeat.restart() def set_status(self, status, context=None): """RPC method Updates the agents status to the new value with the specified context. :param: status: str: GODD, BAD :param: context: str: A serializable that denotes the context of status. """ self._statusobj.update_status(status, context) def get_status(self): """"RPC method Returns the last updated status from the object with the context. The minimum output from the status would be: { "status": "GOOD", "context": None, "utc_last_update": "2016-03-31T15:40:32.685138+0000" } """ return self._statusobj.to_json()
Python
0.000001
@@ -2850,16 +2850,58 @@ eakref%0A%0A +from volttron.platform.agent import utils%0A from vol @@ -3167,16 +3167,38 @@ ay.%0A%22%22%22%0A +utils.setup_logging()%0A _log = l @@ -3224,16 +3224,44 @@ name__)%0A +_log.setLevel(logging.DEBUG) %0A%0Aclass
07e8fada20776f8b67a97e2487d6ec36b2cac1da
unified the error norm
src/analysis.py
src/analysis.py
from __future__ import division import random import math import numpy as np from scipy import linalg from matplotlib.pyplot import plot, subplot, legend import example2sys as e2s import pod import time def runAnalysis(n, k, N=1, example='butter', T=20, sigma=1., integrator='dopri5'): sys = e2s.example2sys(example + '_' + str(n) + '.mat') rsys = sys.truncate(k) results = [] reducedResults = [] controls = [] for i in range(N): Y, Yhat, U = randomRuns(sys, rsys, T, sigma, integrator=integrator) results.append(Y) reducedResults.append(Yhat) controls.append(U) error = [math.fabs(y-yhat) for y, yhat in zip(Y, Yhat)] subplot(3, 1, 1) plotResults(error, T, label='error '+str(i+1)) subplot(3, 1, 2) plotResults(Y, T, label='Y '+str(i+1)) plotResults(Yhat, T, label='Yhat '+str(i+1)) subplot(3, 1, 3) plotResults(U, T, label='U '+str(i+1)) #return Y, Yhat, T, U def plotResults(Y, T, label=None, legend_loc='upper left', show_legend=False): timeSteps = range(1, T+1) plot(timeSteps, Y, label=label) if show_legend: legend(loc=legend_loc) def randomRuns(sys, rsys, T, sigma=10.0, integrator='dopri5'): timeSteps = range(1, T+1) U = [np.array([random.gauss(0.,sigma)]) for t in timeSteps] sys.setupODE(integrator=integrator) rsys.setupODE(integrator=integrator) print('System of order {}'.format(sys.order)) with Timer(): Y = sys(timeSteps, U) print('System of order {}'.format(rsys.order)) with Timer(): Yhat = rsys(timeSteps, U) return Y, Yhat, U class Timer(object): """Allows some basic profiling""" def __enter__(self): self.start = time.time() return self def __exit__(self, ty, val, tb): end = time.time() self.elapsed = end - self.start print('Time elapsed {} seconds'.format(self.elapsed)) return False def optionPricingComparison(N=1000, k=None, option="put", r=0.05, T=1., K=100., L=None): if k is None: k = max(1,int(N/50)) print "SETUP\n====================" print "original system" with Timer(): sys = e2s.optionPricing(N=N, option=option, r=r, T=T, K=K, L=L) print "auto truncated" with Timer(): sys_auto_truncated = \ pod.lss(sys, reduction="truncation_square_root_trans_matrix") sys_auto_truncated.x0 = np.dot(sys_auto_truncated.Ti, sys.x0) print "balanced truncated with k =", k with Timer(): sys_balanced_truncated = \ pod.lss(sys, reduction="truncation_square_root_trans_matrix", k=k) sys_balanced_truncated.x0 = np.dot(sys_balanced_truncated.Ti, sys.x0) print "controllability gramian reduction" with Timer(): sys_control_truncated = \ pod.lss(sys, reduction="controllability_truncation", k=k) sys_control_truncated.x0 = np.dot(sys_control_truncated.Ti, sys.x0) print "============\nEVALUATIONS\n===============" timeSteps = list(np.linspace(0, 1, 100)) print "unreduced system" with Timer(): Y = sys(timeSteps) print "system reduced with balanced truncation, auto sized" with Timer(): Y_auto_truncated = sys_auto_truncated(timeSteps) print 'system reduced with balanced truncation, k={}'.format(k) with Timer(): Y_balanced_truncated = sys_balanced_truncated(timeSteps) print "system reduced with controllability gramian" with Timer(): Y_control_truncated = sys_control_truncated(timeSteps) eps_auto_truncated = [linalg.norm(y-yhat, ord=np.inf) for y, yhat in zip(Y, Y_auto_truncated)] eps_balanced_truncated = [linalg.norm(y-yhat, ord=np.inf) for y, yhat in zip(Y, Y_balanced_truncated)] eps_control_truncated = [linalg.norm(y-yhat, ord=np.inf) for y, yhat in zip(Y, Y_control_truncated)] print "The original system has order ", sys.order print "The auto-sized system has order ", sys_auto_truncated.order print "and a total error of ", max(eps_auto_truncated) print "The balanced and truncated system has order ", \ sys_balanced_truncated.order print "and a total error of ", max(eps_balanced_truncated) print "The control truncated system has order ", sys_control_truncated.order print "and a total error of ", max(eps_control_truncated) raise Exception
Python
0.998442
@@ -3624,24 +3624,49 @@ timeSteps)%0A%0A + norm_order = np.inf%0A%0A eps_auto @@ -3696,37 +3696,41 @@ rm(y-yhat, ord=n -p.inf +orm_order )%0A @@ -3856,37 +3856,41 @@ rm(y-yhat, ord=n -p.inf +orm_order )%0A @@ -4035,21 +4035,25 @@ t, ord=n -p.inf +orm_order )%0A
247e8f8ce8ed4677c629affec6b9a291c730e3a2
Use assert_equal instead of assertEqual in fail testcase.
tests/testcases/fail.py
tests/testcases/fail.py
from systest import TestCase class FailTest(TestCase): """A test that always fails. """ count = 0 def __init__(self, name): super(FailTest, self).__init__() self.name = "fail_" + name def run(self): FailTest.count += 1 self.assertEqual(1, 0)
Python
0
@@ -286,9 +286,10 @@ sert -E +_e qual
3c2d290452a07946880fc25af917b32766f9529d
Update test script to include deposit
testsuite/front_test.py
testsuite/front_test.py
#!/usr/bin/env python2 import gevent import requests import json import time import hashlib ip_address = "vm" port = "3000" url = ''.join(['http://', ip_address, ':', port]) def secret(params, secret): keys = params.keys() keys.sort() hash_str = "" for key in keys: hash_str += (params[key]) md5 = hashlib.md5() md5.update(hash_str) return md5.hexdigest() def test_login(login): info = login assert 'username' in info assert 'secret' in info assert len(info.keys()) == 2 def pytest_funcarg__login(request): r = requests.get(url + '/register') assert r.status_code == 200 info = json.loads(r.text) assert 'username' in info assert 'secret' in info return info def test_balance(login): body = {} body['username'] = login['username'] body['time'] = str(time.time()) body['sign'] = secret(body, login['secret']) r = requests.post(url + '/balance', data=json.dumps(body)) print r.text assert r.status_code == 200 info = json.loads(r.text) assert 'balance' in info
Python
0
@@ -968,25 +968,8 @@ y))%0A - print r.text%0A @@ -1056,8 +1056,443 @@ n info%0A%0A +def test_deposit(login):%0A body = %7B%7D%0A body%5B'username'%5D = login%5B'username'%5D%0A body%5B'time'%5D = str(time.time())%0A body%5B'sign'%5D = secret(body, login%5B'secret'%5D)%0A r = requests.post(url + '/deposit', data=json.dumps(body), timeout=1)%0A assert r.status_code == 200%0A info = json.loads(r.text)%0A assert 'address' in info%0A r2 = requests.post(url + '/deposit', data=json.dumps(body), timeout=1)%0A assert r2.text == r.text%0A%0A
50c4b312a27b61725885ee84d45a0f07f94a8ec6
Handle interactive-on-error also when error is from contextmanager exit.
teuthology/run_tasks.py
teuthology/run_tasks.py
import sys import logging log = logging.getLogger(__name__) def _run_one_task(taskname, **kwargs): submod = taskname subtask = 'task' if '.' in taskname: (submod, subtask) = taskname.rsplit('.', 1) parent = __import__('teuthology.task', globals(), locals(), [submod], 0) mod = getattr(parent, submod) fn = getattr(mod, subtask) return fn(**kwargs) def run_tasks(tasks, ctx): stack = [] try: for taskdict in tasks: try: ((taskname, config),) = taskdict.iteritems() except ValueError: raise RuntimeError('Invalid task definition: %s' % taskdict) log.info('Running task %s...', taskname) manager = _run_one_task(taskname, ctx=ctx, config=config) if hasattr(manager, '__enter__'): manager.__enter__() stack.append(manager) except Exception, e: ctx.summary['success'] = False if 'failure_reason' not in ctx.summary: ctx.summary['failure_reason'] = str(e) log.exception('Saw exception from tasks') if ctx.config.get('interactive-on-error'): from .task import interactive log.warning('Saw failure, going into interactive mode...') interactive.task(ctx=ctx, config=None) finally: try: exc_info = sys.exc_info() while stack: manager = stack.pop() log.debug('Unwinding manager %s', manager) try: suppress = manager.__exit__(*exc_info) except Exception, e: ctx.summary['success'] = False if 'failure_reason' not in ctx.summary: ctx.summary['failure_reason'] = str(e) log.exception('Manager failed: %s', manager) if exc_info == (None, None, None): # if first failure is in an __exit__, we don't # have exc_info set yet exc_info = sys.exc_info() else: if suppress: sys.exc_clear() exc_info = (None, None, None) if exc_info != (None, None, None): log.debug('Exception was not quenched, exiting: %s: %s', exc_info[0].__name__, exc_info[1]) raise SystemExit(1) finally: # be careful about cyclic references del exc_info
Python
0
@@ -2077,32 +2077,296 @@ = sys.exc_info() +%0A%0A if ctx.config.get('interactive-on-error'):%0A from .task import interactive%0A log.warning('Saw failure, going into interactive mode...')%0A interactive.task(ctx=ctx, config=None) %0A
e8eaa2e4bd6cc7fe51e3c4c15a5bf392a24d5b92
generate index
hodge/__init__.py
hodge/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import io import shutil import click from cookiecutter.main import cookiecutter from slugify import slugify from jinja2 import Template, Environment, FileSystemLoader from datetime import datetime import markdown2 from .utils import walk_dir @click.group() def cmds(): pass @cmds.command() @click.argument('site_name', type=str) def init(site_name): click.echo(u'Hodge init new project...') repo_name = site_name.lower().replace(' ', '-') app_name = repo_name.replace("-", "") extra_context = { 'site_name': site_name, "repo_name": repo_name, "app_name": app_name, "pkg_name": app_name, "content_folder": "content" } cookiecutter( 'https://github.com/avelino/hodge-init.git', extra_context=extra_context, no_input=True) @cmds.command() def newpost(): if not os.path.isfile("./hodge.toml"): click.echo(u'hodge.toml (config) not exist!') exit(0) click.echo(u'Hodge new post create...') date = datetime.now() obj = {} obj["title"] = click.prompt('Title', type=str) slug = slugify(obj["title"]) obj["slug"] = click.prompt('Slug', type=str, default=slug) obj["tags"] = click.prompt('Tags (hodge, static)', type=str, default=", ".join(obj["title"].split(" "))) obj["date"] = click.prompt( 'Date', type=str, default=date.strftime("%Y/%m/%d %H:%M:%S")) obj["file"] = click.prompt( "File name", type=str, default=date.strftime("%Y_%m_%d_{}.md".format(obj["slug"]))) template_path = os.path.join(os.path.dirname(__file__), "templates") tmp = Template(io.open(os.path.join(template_path, "newpost.md")).read()) if not os.path.isdir("./content"): os.mkdir("./content") with io.open("./content/{}".format(obj["file"]), "wb") as f: f.write(tmp.render(**obj).encode()) @cmds.command() def build(): if not os.path.isfile("./hodge.toml"): click.echo(u'hodge.toml (config) not exist!') exit(0) click.echo(u'Hodge build...') template_path = os.path.join("theme", "default") env = Environment(autoescape=True, loader=FileSystemLoader(template_path)) template = env.get_template('index.html') shutil.rmtree("./build", ignore_errors=True) for filename in walk_dir("./content"): text = io.open(filename, "rb").read() html = markdown2.markdown(text, extras=["metadata"]) meta = html.metadata content = {"content": html, "meta": meta} if not os.path.isdir("./build"): os.mkdir("./build") with open("./build/{}.html".format(meta.get("slug")), "w") as fh: fh.write(template.render(**content)) click.echo("- {}".format(meta.get("slug"))) def main(): cmds()
Python
0.999996
@@ -2311,16 +2311,24 @@ template +_content = env.g @@ -2344,13 +2344,15 @@ te(' -index +content .htm @@ -2406,16 +2406,32 @@ =True)%0A%0A + index = %5B%5D%0A%0A for @@ -2829,16 +2829,24 @@ template +_content .render( @@ -2911,16 +2911,230 @@ ug%22)))%0A%0A + index.append(content)%0A%0A template_index = env.get_template('index.html')%0A content = %7B%22posts%22: index%7D%0A with open(%22./build/index.html%22, %22w%22) as fh:%0A fh.write(template_index.render(**content))%0A%0A %0Adef mai
f6c0258e257aa537dbb64bb8c5f10c87ec32dcf9
Update my_hooks.py
hooks/my_hooks.py
hooks/my_hooks.py
#!/usr/bin/python """ Example Diaphora export hooks script. In this example script the following fake scenario is considered: 1) There is a something-user.i64 database, for user-land stuff. 2) There is a something-kernel.i64 database, for kernel-land stuff. 3) We export all functions from the something-user.i64 database. 4) We only export from something-kernel.i64 the syscall_* or sys_* prefixed functions. 5) In both databases there are constants referencing the build identifier but they are different for both databases: BUILD-1000 in the user-land part and BUILD-2000 in the kernel-land part. For making a perfect match based on the constants found in both databases, we change the strings BUILD-XXX to the generic string "BUILD-ID" for both databases. """ #----------------------------------------------------------------------- FUNC_PREFIXES = ["syscall_", "sys_"] BUILD_IDS = ["BUILD-1000", "BUILD-2000"] #----------------------------------------------------------------------- class CMyHooks: def __init__(self, diaphora_obj): """ @diaphora_obj is the CIDABinDiff object being used. """ self.diaphora = diaphora_obj self.db_name = self.diaphora.db_name def before_export_function(self, ea, func_name): """ @ea is the address of the function that is going to be read. Return True for the function to be read, or False to ignore it. """ # If the IDB name has the word 'user' on it, it's the user-land database for # which we want to export everything. if self.db_name.find("user") > -1: return True # Otherwise, it's the kernel-land IDB for which we only want to export the # syscall functions. if func_name: # It is a syscall if "syscall_%s" % func_name in SYSCALL_NAMES: return True return False def after_export_function(self, d): """ @d is a dictionary with everything exported by Diaphora for the current function. Transformations can be applied to the dictionary like changing some strings or constants or whatever else. The function must return a new dictionary with the modifications. """ # Search if any of the constants in the dictionary has the string "BUILD-*" # and, if so, change it in the export process to a generic "BUILD-ID" string # that will match more functions. for build_id in BUILD_IDS: for key in d: if type(d[key]) is str: if d[key].find(build_id) > -1: d[key] = d[key].replace(build_id, "GENERIC-BUILD-ID") return d HOOKS = {"DiaphoraHooks": CMyHooks}
Python
0.000001
@@ -1733,20 +1733,20 @@ # I -t is +s it a sysca @@ -1751,61 +1751,88 @@ call +? %0A -if %22syscall_%25s%22 %25 func_name in SYSCALL_NAMES:%0A +for prefix in FUNC_PREFIXES:%0A if func_name.startswith(prefix):%0A
4ab33cec7c0f4ee9fee7a7dce1c28466780b7074
Add hoomd.box.Box to main namespace
hoomd/__init__.py
hoomd/__init__.py
# Copyright (c) 2009-2019 The Regents of the University of Michigan # This file is part of the HOOMD-blue project, released under the BSD 3-Clause # License. """ HOOMD-blue python API :py:mod:`hoomd` provides a high level user interface for defining and executing simulations using HOOMD. .. rubric:: API stability :py:mod:`hoomd` is **stable**. When upgrading from version 3.x to 3.y (y > x), existing job scripts that follow *documented* interfaces for functions and classes will not require any modifications. **Maintainer:** Joshua A. Anderson """ # Maintainer: joaander import sys; import ctypes; import os; # need to import HOOMD with RTLD_GLOBAL in python sitedir builds if not ('NOT_HOOMD_PYTHON_SITEDIR' in os.environ): flags = sys.getdlopenflags(); sys.setdlopenflags(flags | ctypes.RTLD_GLOBAL); from hoomd import _hoomd; if not ('NOT_HOOMD_PYTHON_SITEDIR' in os.environ): sys.setdlopenflags(flags); from hoomd import meta from hoomd import context from hoomd import cite from hoomd import analyze from hoomd import benchmark from hoomd import comm from hoomd import compute from hoomd import data from hoomd import dump from hoomd import group from hoomd import init from hoomd import integrate from hoomd import option from hoomd import update from hoomd import util from hoomd import variant from hoomd import lattice from hoomd import device try: from hoomd import md except ImportError: pass try: from hoomd import hpmc except ImportError: pass try: from hoomd import dem except ImportError: pass # TODO: enable this import after updating MPCD to the new API # try: # from hoomd import mpcd # except ImportError: # pass from hoomd.simulation import Simulation from hoomd.state import State from hoomd.operations import Operations from hoomd.snapshot import Snapshot from hoomd.logger import Logger from hoomd import tuner from hoomd._hoomd import WalltimeLimitReached; _default_excepthook = sys.excepthook; ## \internal # \brief Override pythons except hook to abort MPI runs def _hoomd_sys_excepthook(type, value, traceback): _default_excepthook(type, value, traceback); sys.stderr.flush(); if context.current.device is not None: _hoomd.abort_mpi(context.current.device.cpp_exec_conf); sys.excepthook = _hoomd_sys_excepthook __version__ = "{0}.{1}.{2}".format(*_hoomd.__version__)
Python
0.000003
@@ -1863,16 +1863,42 @@ Logger%0A +from hoomd.box import Box%0A from hoo
602ad51f9d3f9577bf805ca317ecf0dfb31b5994
Create config from ena ftp
conf.py
conf.py
import logging import multiprocessing import re import os import tempfile import yaml from collections import OrderedDict # Adapted from: https://github.com/pnnl/atlas/blob/master/atlas/conf.py # http://stackoverflow.com/a/3675423 def replace_last(source_string, replace_what, replace_with): head, _sep, tail = source_string.rpartition(replace_what) if _sep == '': return tail else: return head + replace_with + tail def get_sample_files(path): samples = OrderedDict() seen = set() for dir_name, sub_dirs, files in os.walk(path): print(dir_name, sub_dirs, files) for fname in files: if ".fastq" in fname or ".fq" in fname: sample_id = fname.partition(".fastq")[0] if ".fq" in sample_id: sample_id = fname.partition(".fq")[0] sample_id = sample_id.replace("_R1", "").replace("_r1", "").replace("_R2", "").replace("_r2", "") sample_id = re.sub("_1$", "", sample_id) sample_id = re.sub("_2$", "", sample_id) sample_id = sample_id.replace("_", "-").replace(" ", "-") fq_path = os.path.join(dir_name, fname) fastq_paths = [fq_path] if fq_path in seen: continue if "_R1" in fname or "_r1" in fname or "_1" in fname: fname = replace_last(fname,"_1.","_2.") r2_path = os.path.join(dir_name, fname.replace("_R1", "_R2").replace("_r1", "_r2")) if not r2_path == fq_path: seen.add(r2_path) fastq_paths.append(r2_path) if "_R2" in fname or "_r2" in fname or "_2" in fname: fname = replace_last(fname,"_2.","_1.") r1_path = os.path.join(dir_name, fname.replace("_R2", "_R1").replace("_r2", "_r1")) if not r1_path == fq_path: seen.add(r1_path) fastq_paths.insert(0, r1_path) if sample_id in samples: logging.warn("Duplicate sample %s was found after renaming; skipping..." % sample_id) continue samples[sample_id] = {'path': fastq_paths } return samples def make_config(config, path): """Write the file `config` and complete the sample names and paths for all files in `path`.""" represent_dict_order = lambda self, data: self.represent_mapping('tag:yaml.org,2002:map', data.items()) yaml.add_representer(OrderedDict, represent_dict_order) path = os.path.realpath(path) conf = OrderedDict() samples = get_sample_files(path) logging.info("Found %d samples under %s" % (len(samples), path)) conf["project"] = "My-Project" conf["adapters_fasta"] = "/data/ngs/adapters/contaminant_list.txt" conf["pandaseq_overlap"] = "10" conf["pandaseq_quality"] = "25" conf["pandaseq_minlength"] = "100" conf["pandaseq_maxlength"] = "700" conf["forward_primer"] = "CCTACGGGNGGCWGCAG" conf["reverse_primer"] = "GACTACHVGGGTATCTAATCC" conf["silva_arb"] = "/data/db/Silva/128/SSURef_NR99_128_SILVA_07_09_16_opt.arb" conf["mergepairs"] = "vsearch" conf["metadata"] = "../data/metadata.txt" conf["data"] = samples with open(config, "w") as f: print(yaml.dump(conf, default_flow_style=False), file=f) logging.info("Configuration file written to %s" % config) if __name__ == "__main__": make_config(config="config.yaml", path="../data/Clean/")
Python
0
@@ -189,16 +189,55 @@ onf.py%0A%0A +ena = True%0Ahost = %22ftp.sra.ebi.ac.uk%22%0A%0A # http:/ @@ -563,70 +563,222 @@ -for dir_name, sub_dirs, files in +walker = %22%22%0A if ena:%0A import ftputil%0A ftphost = ftputil.FTPHost(host, %22anonymous%22,%22%22)%0A walker = ftph os +t .walk( -path):%0A print( +%22vol1/ERA651/ERA651425/fastq/%22)%0A else:%0A walker = os.walk(path)%0A for dir_ @@ -798,17 +798,27 @@ s, files -) + in walker: %0A @@ -1406,16 +1406,78 @@ fname)%0A + if ena: fq_path = os.path.join(host, fq_path)%0A @@ -1785,24 +1785,90 @@ 1%22, %22_r2%22))%0A + if ena: r2_path = os.path.join(host, r2_path)%0A @@ -2123,24 +2123,24 @@ _2.%22,%22_1.%22)%0A - @@ -2227,24 +2227,90 @@ 2%22, %22_r1%22))%0A + if ena: r1_path = os.path.join(host, r1_path)%0A
53881ce561802c13fe66d73203333322a138b57c
fix sphinx pngmath bug
conf.py
conf.py
# -*- coding: utf-8 -*- # # scikit-rf-web documentation build configuration file, created by # sphinx-quickstart on Sat Dec 18 17:03 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os import sphinx_bootstrap_theme # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath('sphinxext')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'scikit-rf' copyright = u'2016, the scikit-rf development team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '' # The full version, including alpha/beta/rc tags. release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build','.git','s','resources','attic','blog', 'code/lyxport/dist'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. #html_theme = 'default' #html_theme = 'sphinxdoc' #html_theme = 'agogo' # inherits from sphinxdoc and modifies it a little # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. #html_style = 'agogo.css' html_theme = 'bootstrap' html_theme_options = { 'source_link_position': "footer", 'bootswatch_theme': "flatly", 'navbar_sidebarrel': True, 'bootstrap_version': "3", } # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Only works with the default theme, makes the sidebar not scroll: #html_theme_options = { "stickysidebar": "true" } # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['themes'] html_theme_path = sphinx_bootstrap_theme.get_html_theme_path() # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation".ke html_title = u"scikit-rf" # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = "scikit-rf" # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "scikit-rf-title-flat.svg" # The name of an image file (within the static path) to use as favicon of the # pixels large. html_favicon = "favicon.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # TODO: split this up into several chunks #html_sidebars = { # '**': ['sidebar_versions.html', 'sidebar_links.html'], #} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_use_modindex = False # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = ''
Python
0.999877
@@ -1073,10 +1073,10 @@ ext. -pn +im gmat
4c9f32da941ae76d8eaf68f9673d7763af8c74f0
Create train directory if not exists
data.py
data.py
# -*- coding: utf-8 -*- import sugartensor as tf import numpy as np import pandas as pd import librosa import glob import os import string import itertools __author__ = 'buriburisuri@gmail.com' __vocabulary_save_dir__ = "asset/train/" class VCTK(object): def __init__(self, batch_size=16, data_path='asset/data/', vocabulary_loading=False): @tf.sg_producer_func def _load_mfcc(src_list): lab, wav = src_list # label, wave_file # decode string to integer lab = np.fromstring(lab, np.int) # load wave file wav, sr = librosa.load(wav, mono=True) # mfcc mfcc = librosa.feature.mfcc(wav, sr) # return result return lab, mfcc # path for loading just vocabulary if vocabulary_loading: vocabulary_file = __vocabulary_save_dir__ + self.__class__.__name__ + '_vocabulary.npy' if os.path.exists(vocabulary_file): self.index2byte = np.load(vocabulary_file) self.byte2index = {} for i, b in enumerate(self.index2byte): self.byte2index[b] = i self.voca_size = len(self.index2byte) tf.sg_info('VCTK vocabulary loaded.') return # load corpus labels, wave_files = self._load_corpus(data_path) # to constant tensor label = tf.convert_to_tensor(labels) wave_file = tf.convert_to_tensor(wave_files) # create queue from constant tensor label, wave_file = tf.train.slice_input_producer([label, wave_file], shuffle=True) # decode wave file label, mfcc = _load_mfcc(source=[label, wave_file], dtypes=[tf.sg_intx, tf.sg_floatx], capacity=128, num_threads=32) # create batch queue with dynamic pad batch_queue = tf.train.batch([label, mfcc], batch_size, shapes=[(None,), (20, None)], num_threads=32, capacity=batch_size*48, dynamic_pad=True) # split data self.label, self.mfcc = batch_queue # batch * time * dim self.mfcc = self.mfcc.sg_transpose(perm=[0, 2, 1]) # calc total batch count self.num_batch = len(labels) // batch_size # print info tf.sg_info('VCTK corpus loaded.(total data=%d, total batch=%d)' % (len(labels), self.num_batch)) def _load_corpus(self, data_path): # read meta-info df = pd.read_table(data_path + 'speaker-info.txt', usecols=['ID', 'AGE', 'GENDER', 'ACCENTS'], index_col=False, delim_whitespace=True) # make file ID file_ids = [] for d in [data_path + 'txt/p%d/' % uid for uid in df.ID.values]: file_ids.extend([f[-12:-4] for f in sorted(glob.glob(d + '*.txt'))]) # make wave file list wav_files = [data_path + 'wav48/%s/' % f[:4] + f + '.wav' for f in file_ids] # exclude extremely short wave files file_id, wav_file = [], [] for i, w in zip(file_ids, wav_files): if os.stat(w).st_size > 240000: # at least 5 seconds file_id.append(i) wav_file.append(w) # read label sentence sents = [] for f in file_id: # remove punctuation, to lower, clean white space s = ' '.join(open(data_path + 'txt/%s/' % f[:4] + f + '.txt').read() .translate(None, string.punctuation).lower().split()) # append byte code sents.append([ord(ch) for ch in s]) # make vocabulary self.index2byte = [0] + list(np.unique(list(itertools.chain(*sents)))) # add <EMP> token self.byte2index = {} for i, b in enumerate(self.index2byte): self.byte2index[b] = i self.voca_size = len(self.index2byte) self.max_len = np.max([len(s) for s in sents]) # save vocabulary vocabulary_file = __vocabulary_save_dir__ + self.__class__.__name__ + '_vocabulary.npy' if not os.path.exists(vocabulary_file): np.save(vocabulary_file, self.index2byte) # byte to index label label = [] for s in sents: # save as string for variable-length support. label.append(np.asarray([self.byte2index[ch] for ch in s]).tostring()) return label, wav_file def print_index(self, indices): # transform label index to character for i, index in enumerate(indices): str_ = '' for ch in index: if ch > 0: str_ += unichr(self.index2byte[ch]) elif ch == 0: # <EOS> break print str_
Python
0.000001
@@ -4071,36 +4071,35 @@ vocabulary_ -file +dir = __vocabulary_ @@ -4126,32 +4126,353 @@ class__.__name__ +%0A if not os.path.exists(os.path.dirname(vocabulary_dir)):%0A try:%0A os.makedirs(os.path.dirname(vocabulary_dir))%0A except OSError as exc: # Guard against race condition%0A if exc.errno != errno.EEXIST:%0A raise%0A vocabulary_file = vocabulary_dir + '_vocabulary.
c7ecf728e12dd3a59f4ef45e30b61ce5c52ceca5
Fix corpus to Polish language
analysis/textclassification/bagofwords.py
analysis/textclassification/bagofwords.py
import functools from nltk import ngrams from nltk.collocations import BigramCollocationFinder from nltk.metrics import BigramAssocMeasures import nltk.corpus import re import definitions INVALID_TOKEN_PATTERN = r'^[!%"%\*\(\)\+,&#-\.\$/\d:;\?\<\>\=@\[\]].*' NEGATION_TOKEN_PATTERN = r'^nie$' def get_stopwords_list(): return list(nltk.corpus.stopwords.words('polish')) def filter_stopwords(words): polish_stopwords = get_stopwords_list() return [w for w in words if w not in polish_stopwords] def filter_custom_set(words, custom_set): r = re.compile(custom_set) words = list(filter(lambda w: not r.match(w), words)) return words def include_significant_bigrams(words, score_fn=BigramAssocMeasures.likelihood_ratio, n=100): bigram_finder = BigramCollocationFinder.from_words(words) bigrams = bigram_finder.nbest(score_fn, n) return list(words + bigrams) def get_all_lowercase(words): return [x.lower() for x in words] def get_bag_of_words(words): return dict([(word, True) for word in words]) def mark_negations(words): add_negation_suffix = False r_negation = re.compile(NEGATION_TOKEN_PATTERN) r_stopword = re.compile(INVALID_TOKEN_PATTERN) for index, item in enumerate(words): if (r_stopword.match(item)): add_negation_suffix = False continue if (r_negation.match(item)): add_negation_suffix = True continue if (add_negation_suffix): words[index] = words[index] + "_NEG" return words def get_processed_bag_of_words(text, lemmatizer, settings): words = nltk.tokenize.word_tokenize(text) words = get_all_lowercase(words) if lemmatizer is not None: words = [lemmatizer.get_lemma(word) for word in words] if (settings.FILTER_STOPWORDS): words = filter_stopwords(words) words = mark_negations(words) words = filter_custom_set(words, INVALID_TOKEN_PATTERN) if settings.MAX_FEATURES > 0: words = words[:settings.MAX_FEATURES] words = functools.reduce(lambda x, y: x + y, [words if n == 1 else list([' '.join(ngram) for ngram in ngrams(words, n)]) for n in range(1, settings.MAX_NGRAMS + 1)]) return get_bag_of_words(words)
Python
0.999998
@@ -1648,16 +1648,26 @@ ize(text +, 'polish' )%0A wo
de027652a4bb12c6d1a4cb7bc85448c8c2a0d321
use argparse to get arguments from command line
sortroms/__main__.py
sortroms/__main__.py
from sortroms import main if __name__ == '__main__': main()
Python
0.000001
@@ -18,16 +18,232 @@ ort main +%0Aimport argparse%0A%0Aparser = argparse.ArgumentParser(%0A%09description='Sort emulator ROM files',%0A%09prog='sortroms'%0A)%0A%0Aparser.add_argument(%0A%09'folder',%0A%09metavar='DIR',%0A%09type=str,%0A%09nargs='?',%0A%09help='The ROM folder to sort.'%0A) %0A%0Aif __n @@ -267,12 +267,44 @@ _':%0A +%09args = parser.parse_args()%0A %09main( +args )%0A