commit
stringlengths
40
40
subject
stringlengths
4
1.73k
repos
stringlengths
5
127k
old_file
stringlengths
2
751
new_file
stringlengths
2
751
new_contents
stringlengths
1
8.98k
old_contents
stringlengths
0
6.59k
license
stringclasses
13 values
lang
stringclasses
23 values
59b00a4f5cc5aa5139492660206c99185df24f7b
create unittest for area serializer for #191
Sinar/popit_ng,Sinar/popit_ng
popit/tests/test_area_api.py
popit/tests/test_area_api.py
from rest_framework.test import APITestCase from rest_framework import status from rest_framework.authtoken.models import Token from popit.models import * class AreaAPITestCase(APITestCase): fixtures = [ "api_request_test_data.yaml" ] def test_create_area_serializer(self): pass def test_fetch_area_serializer(self): client = self.client.get("/en/areas/b0c2dbaba8ea476f91db1e3c2320dcb7") def test_update_area_serializer(self): pass
agpl-3.0
Python
a8274a5d5e4ec68f3ee594ffa741e90f11cf24db
Add tool to regenerate JSON files from P4 progs
p4lang/PI,p4lang/PI,p4lang/PI,p4lang/PI
tools/update_test_bmv2_jsons.py
tools/update_test_bmv2_jsons.py
#!/usr/bin/env python2 import argparse import fnmatch import os import subprocess import sys def find_files(root): files = [] for path_prefix, _, filenames in os.walk(root, followlinks=False): for filename in fnmatch.filter(filenames, '*.p4'): path = os.path.join(path_prefix, filename) json_path = os.path.splitext(path)[0] + ".json" if os.path.exists(json_path): files.append([path, json_path]) return files def check_compiler_exec(path): try: with open(os.devnull, 'w') as devnull: subprocess.check_call([path, "--version"], stdout=devnull, stderr=devnull) return True except subprocess.CalledProcessError: return True except OSError: # exec not found return False def main(): parser = argparse.ArgumentParser( description="Search for P4 files recursively in provided directory " "and if they have a JSON equivalent regenerates it using the bmv2 " "compiler.") parser.add_argument("--root", type=str, default=os.getcwd(), help="Directory in which to recursively search for P4 " "files. Default is current working directory.") parser.add_argument("--compiler", type=str, default="p4c-bmv2", help="bmv2 compiler to use. Default is p4c-bmv2.") args = parser.parse_args() if not check_compiler_exec(args.compiler): print "Cannot use provided compiler" sys.exit(1) files = find_files(args.root) for input_f, output_f in files: print "Regenerating", input_f, "->", output_f try: cmd = [args.compiler, input_f, "--json", output_f, "--keep-pragmas"] with open(os.devnull, 'w') as devnull: out = subprocess.check_output(cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError: print "ERROR" print " ".join(cmd) print out except OSError: print "FATAL ERROR" sys.exit(2) if __name__ == '__main__': main()
apache-2.0
Python
f1d3717b45650244d9a4f44caf6f610636bb72ee
Add other_data_collections/2015ApJ...812...60B/biteau.py
gammapy/gamma-cat
other_data_collections/2015ApJ...812...60B/biteau.py
other_data_collections/2015ApJ...812...60B/biteau.py
""" Script to check and ingest Biteau & Williams (2015) data for gamma-cat. """ from astropy.table import Table class Biteau: def __init__(self): filename = 'other_data_collections/2015ApJ...812...60B/BiteauWilliams2015_AllData_ASDC_v2016_12_20.ecsv' self.table = Table.read(filename, format='ascii.ecsv', delimiter='|') def run_checks(self): # self.table.pprint() self.table.show_in_browser(jsviewer=True) self.table.info('stats') if __name__ == '__main__': biteau = Biteau() biteau.run_checks()
bsd-3-clause
Python
6f7fd163106ec5f4346eaaef04ed9726a3289801
add wrong reversesubstring problem solution
5outh/practice,5outh/practice
problems/reversesubstring.py
problems/reversesubstring.py
import sys test = "aabbbbababaaabbab" """ Find a) the first occurrence of b in string b) the longest list of only as in string, store final index """ def solution(string): firstB = string.find('b') print ((string, firstB)) if(firstB == -1): return (0, 0) longestA = 0 longestAIndex = 0 currentA = 0 currentAIndex = 0 for i in range(firstB, len(string)): if (string[i] == 'a'): print ("found a", str(i)) currentAIndex = i currentA += 1 if(currentA > longestA): longestA = currentA longestAIndex = currentAIndex if(string[i] == 'b'): currentA = 0 return (firstB, longestAIndex) if __name__ == '__main__': if (len(sys.argv) > 1): print(solution(sys.argv[1])) else: print(solution(test))
mit
Python
9ff1b6ffa297199dc73042382c369fc7af0813fc
Create stress_test1.py
MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab
home/moz4r/Test/stress_test1.py
home/moz4r/Test/stress_test1.py
# stress test from time import sleep import random leftPort = "COM3" i01 = Runtime.createAndStart("i01", "InMoov") sleep(1) i01.startMouth() i01.startHead(leftPort) i01.startLeftHand(leftPort) i01.head.jaw.map(0,180,85,110) i01.startMouthControl(leftPort) i01.leftHand.thumb.setVelocity(random.randint(100,300)) MoveRandomTimer = Runtime.start("MoveRandomTimer","Clock") def MoveRandom(timedata): i01.leftHand.thumb.moveTo(random.randint(50,130)) MoveRandomTimer.setInterval(random.randint(10000,11000)) i01.mouth.speak("voice test voice test") MoveRandomTimer.addListener("pulse", python.name, "MoveRandom") MoveRandomTimer.startClock()
apache-2.0
Python
a183922bd275414259800e75fd78db980604fa20
create thread3
wangwei7175878/tutorials
threading/thread3_join.py
threading/thread3_join.py
import threading import time def thread_job(): print('T1 start\n') for i in range(10): time.sleep(0.1) print('T1 finish\n') def T2_job(): print('T2 start\n') print('T2 finish\n') def main(): added_thread = threading.Thread(target=thread_job, name='T1') thread2 = threading.Thread(target=T2_job, name='T2') added_thread.start() thread2.start() thread2.join() added_thread.join() print('all done\n') if __name__ == '__main__': main()
mit
Python
143dbdb6d0d9840c4991eadbb2f5459398a6ddae
Add a 'cache' which only caches ETOPO1 files.
tilezen/joerd,mapzen/joerd
joerd/store/cache.py
joerd/store/cache.py
from joerd.mkdir_p import mkdir_p from joerd.plugin import plugin from os import link import os.path class CacheStore(object): """ Every tile that gets generated requires ETOPO1. Rather than re-download it every time (it's 446MB), we cache that file only. This is a bit of a hack, and would be better replaced by a generic fixed-size LRU/LFU cache. Even better if the cache could be shared between multiple Joerd processes on the same host. """ def __init__(self, cfg): create_fn = plugin('store', store_type, 'create') self.store = create_fn('store', cfg['store']) self.cache_dir = cfg['cache_dir'] def upload_all(self, d): self.store.upload_all(d) @contextmanager def upload_dir(self): with tmpdir() as t: yield t self.upload_all(t) def exists(self, filename): return self.store.exists(filename) def get(self, source, dest): if 'ETOPO1' in source: cache_path = os.path.join(self.cache_dir, source) if not os.path.exists(cache_path): mkdir_p(os.path.dirname(cache_path)) self.store.get(source, cache_path) # hard link to dest. this makes it non-portable, but means that # we don't have to worry about whether GDAL supports symbolic # links, and we don't have to worry about deleting files, as they # are reference counted by the OS. link(cache_path, dest) else: self.store.get(source, dest) def create(cfg): return CacheStore(cfg)
mit
Python
679ae2966f44a071630934c7b7d9eeb550a59223
Create balance_array.py
gnhuy91/python-utils
balance_array.py
balance_array.py
''' `Balance Array` Find i in array A where: A[1] + A[2]...A[i-1] = A[i+1] + A[i+2]...A[len(A)] Write a `balanceSum` function which take an integer array as input, it should return the smallest i, where i is an index in the array such that the sum of elements to its left is equal to the sum of elements to its right. Note: There always exist a solution. ''' ''' TODO: use `pytest` or the likes to run tests more easily. ''' def balanceSum(A): # Slow performance, need optimization # Iterate from 1->N-1 instead of 0->N or 1->N+1, b/c the `balance` index # can not be 0 or N, checking for them is pointless. # Also iterate from 1->N-1 is obviously faster than 0->N or 1->N+1. for i in range(1, len(A)): left_sum = sum(A[:i-1]) right_sum = sum(A[i:]) if left_sum == right_sum: return i return None def balanceSum2(A): # currently is wrong left_sum, right_sum = 0, sum(A) for i, value in enumerate(A): i += 1 if left_sum == right_sum: return i left_sum += A[i-1] right_sum -= A[i] print i, left_sum, right_sum return None def test_one(func): inp = [4,1,2,3,3] out = 3 if out != func(inp): return False return True def test_two(func): inp = [3,1,2,1] out = 2 if out != func(inp): return False return True def test_three(func): inp = [3,1,3,1] out = 2 if out != func(inp): return False return True def main(): test_func = balanceSum print test_one(test_func) print test_two(test_func) print test_three(test_func) if __name__ == '__main__': import sys sys.exit(int(main() or 0))
mit
Python
19dd8b925b188bc09eb85952db1f9f11db4c570e
add batch pics
congminghaoxue/learn_python
batch_cut_pic.py
batch_cut_pic.py
#!/usr/bin/env python # -*- coding: utf-8 -*- #function: 剪切更改图片尺寸大小 import os import os.path import sys, getopt, argparse from PIL import Image from change_pic_size_by_cut import CutImage def main(): argc = len(sys.argv) cmdargs = str(sys.argv) parser = argparse.ArgumentParser(description="Tool for batch cut the image") parser.add_argument('-f', '--fromdir', required=True, help='the directory path of the input file') parser.add_argument('-H', '--height',type=int, required=True, help='height of the output file') parser.add_argument('-W', '--width',type=int, required=True, help='width of the output file') parser.add_argument('-d', '--outdir', required=True, help='the directory of the output file') parser.add_argument('-T', '--type', required=False, help='the type of the output file: jpeg, git, png ,etc') args = parser.parse_args() fromdir = args.fromdir outdir = args.outdir width = args.width height = args.height if args.type == None: type = 'png' else: type = args.type for file in os.listdir(fromdir): if file == "desktop.ini": continue filein = os.path.join(fromdir, file) fileout = os.path.join(outdir, file) try: CutImage(filein, fileout, width, height, type) except Exception as e: print(e) continue if __name__ == '__main__': main()
apache-2.0
Python
a4c71bcefa255e3f2ec4fcbcd77e614669190250
Set up change set delta lambda
PRX/Infrastructure,PRX/Infrastructure,PRX/Infrastructure,PRX/Infrastructure,PRX/Infrastructure
cd/lambdas/change-set-delta-notification/lambda_function.py
cd/lambdas/change-set-delta-notification/lambda_function.py
# Invoked by: CodePipeline # Returns: Error or status message # # Published messages about deltas between a CloudFormation stack change set and # the current version of the stack. The stack parameter values for both the # stack and change set are queried, compared, and the differences are sent as a # message to the Slack relay. # # This should always callback to the CodePipeline API to indicate success or # failure. import boto3 import traceback import json import re import os code_pipeline = boto3.client('codepipeline') cloudformation = boto3.client('cloudformation') sns = boto3.client('sns') def put_job_success(job, message): print('Putting job success') print(message) code_pipeline.put_job_success_result(jobId=job['id']) def put_job_failure(job, message): print('Putting job failure') print(message) code_pipeline.put_job_failure_result( jobId=job['id'], failureDetails={'message': message, 'type': 'JobFailed'}) def parameters_delta_attachment(user_parameters): stack_name = user_parameters['StackName'] change_set_name = user_parameters['ChangeSetName'] # Get current stack parameter values stack = cloudformation.describe_stacks(StackName=stack_name)['Stacks'][0] stack_parameters = stack['Parameters'] # Get new parameter values from change set change_set = cloudformation.describe_change_set( ChangeSetName=change_set_name, StackName=stack_name ) change_set_parameters = change_set['Parameters'] # Combine parameters from stack and change set parameters = {} for p in stack_parameters: if not p['ParameterKey'] in parameters: parameters[p['ParameterKey']] = {} parameters[p['ParameterKey']]['StackValue'] = p['ParameterValue'] for p in change_set_parameters: if not p['ParameterKey'] in parameters: parameters[p['ParameterKey']] = {} parameters[p['ParameterKey']]['ChangeSetValue'] = p['ParameterValue'] # Find values that have changed, and build strings that will be included in # the Slack message describing the changes deltas = [] for k, v in parameters.items(): if k == 'PipelineExecutionNonce': continue elif 'StackValue' not in v: deltas.append(f"*{k}*: ❔ ➡ `{v['ChangeSetValue']}`") elif 'ChangeSetValue' not in v: deltas.append(f"*{k}*: `{v['StackValue']}` ➡ ❌") elif v['StackValue'] != v['ChangeSetValue']: before = v['StackValue'] after = v['ChangeSetValue'] if re.search(r'EcrImageTag', k) or re.search(r'GitCommit', k): base = 'https://github.com/PRX' slug = k.replace('EcrImageTag', '').replace('GitCommit', '') repo = f'{slug}.prx.org'.replace('Infrastructure.prx.org', 'Infrastructure') url = f'{base}/{repo}/compare/{before}...{after}' deltas.append(f"*{k}*: `{before}` ➡ `<{url}|{after}>`") else: deltas.append(f"*{k}*: `{before}` ➡ `{after}`") unchanged_count = len(parameters) - len(deltas) return { 'title': 'Stack Parameters Delta', 'footer': f'Excludes {unchanged_count} unchanged parameters', 'mrkdwn_in': ['text'], 'text': '\n'.join(deltas) } def slack_message(notification): return { 'channel': '#ops-deploys', 'username': 'AWS CodePipeline', 'icon_emoji': ':ops-codepipeline:', 'attachments': [ parameters_delta_attachment(notification) ] } def sns_message(notification): return json.dumps(slack_message(notification)) def lambda_handler(event, context): try: print('Posting delta notification...') job = event['CodePipeline.job'] cfg = job['data']['actionConfiguration']['configuration'] user_parameters = json.loads(cfg['UserParameters']) sns.publish( TopicArn=os.environ['SLACK_MESSAGE_RELAY_TOPIC_ARN'], Message=sns_message(user_parameters) ) # Cleanup put_job_success(job, '') return '...Done' except Exception as e: print('Function failed due to exception.') print(e) traceback.print_exc() put_job_failure(job, 'Function exception: ' + str(e))
mit
Python
f36a0d1d53b4a15d8ead51a54260946f293a8718
add mac free memory script
superhj1987/mac_useful_things,Suninus/mac_useful_things,Suninus/mac_useful_things,superhj1987/mac_useful_things
mac_free.py
mac_free.py
#!/usr/bin/python ''' Created on Jun 1, 2014 @author: jay ''' import subprocess import re # Get process info ps = subprocess.Popen(['ps', '-caxm', '-orss,comm'], stdout=subprocess.PIPE).communicate()[0] vm = subprocess.Popen(['vm_stat'], stdout=subprocess.PIPE).communicate()[0] # Iterate processes processLines = ps.split('\n') sep = re.compile('[\s]+') rssTotal = 0 # kB for row in range(1,len(processLines)): rowText = processLines[row].strip() rowElements = sep.split(rowText) try: rss = float(rowElements[0]) * 1024 except: rss = 0 # ignore... rssTotal += rss # Process vm_stat vmLines = vm.split('\n') sep = re.compile(':[\s]+') vmStats = {} for row in range(1,len(vmLines)-2): rowText = vmLines[row].strip() rowElements = sep.split(rowText) vmStats[(rowElements[0])] = int(rowElements[1].strip('\.')) * 4096 print 'Wired Memory:\t\t%d MB' % ( vmStats["Pages wired down"]/1024/1024 ) print 'Active Memory:\t\t%d MB' % ( vmStats["Pages active"]/1024/1024 ) print 'Inactive Memory:\t%d MB' % ( vmStats["Pages inactive"]/1024/1024 ) print 'Free Memory:\t\t%d MB' % ( vmStats["Pages free"]/1024/1024 ) print 'Real Mem Total (ps):\t%.3f MB' % ( rssTotal/1024/1024 )
mit
Python
d9be75200af8c63a4457b6fb6ee107f4e8aa1048
Create medium_BinaryConverter.py
GabrielGhe/CoderbyteChallenges,GabrielGhe/CoderbyteChallenges
medium_BinaryConverter.py
medium_BinaryConverter.py
""" Convert from binary string to integer """ def BinaryConverter(str): return int(str,2) print BinaryConverter(raw_input())
mit
Python
b9034ca499ae8c0366ac8cd5ee71641f39c0ffba
Add taxonomy model and initiation
Nesiehr/osf.io,binoculars/osf.io,pattisdr/osf.io,mattclark/osf.io,caseyrollins/osf.io,leb2dg/osf.io,baylee-d/osf.io,monikagrabowska/osf.io,monikagrabowska/osf.io,acshi/osf.io,icereval/osf.io,adlius/osf.io,leb2dg/osf.io,aaxelb/osf.io,chrisseto/osf.io,binoculars/osf.io,rdhyee/osf.io,alexschiller/osf.io,chennan47/osf.io,mattclark/osf.io,mluo613/osf.io,sloria/osf.io,caneruguz/osf.io,emetsger/osf.io,baylee-d/osf.io,mluo613/osf.io,saradbowman/osf.io,mfraezz/osf.io,laurenrevere/osf.io,caneruguz/osf.io,cslzchen/osf.io,rdhyee/osf.io,caneruguz/osf.io,cslzchen/osf.io,leb2dg/osf.io,cslzchen/osf.io,Nesiehr/osf.io,HalcyonChimera/osf.io,felliott/osf.io,samchrisinger/osf.io,TomBaxter/osf.io,mattclark/osf.io,adlius/osf.io,cwisecarver/osf.io,cslzchen/osf.io,pattisdr/osf.io,pattisdr/osf.io,rdhyee/osf.io,chrisseto/osf.io,acshi/osf.io,adlius/osf.io,hmoco/osf.io,Johnetordoff/osf.io,alexschiller/osf.io,monikagrabowska/osf.io,hmoco/osf.io,samchrisinger/osf.io,hmoco/osf.io,HalcyonChimera/osf.io,binoculars/osf.io,laurenrevere/osf.io,laurenrevere/osf.io,TomBaxter/osf.io,caneruguz/osf.io,erinspace/osf.io,mluo613/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,mfraezz/osf.io,samchrisinger/osf.io,cwisecarver/osf.io,crcresearch/osf.io,emetsger/osf.io,mluo613/osf.io,sloria/osf.io,Johnetordoff/osf.io,chennan47/osf.io,aaxelb/osf.io,baylee-d/osf.io,alexschiller/osf.io,CenterForOpenScience/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,rdhyee/osf.io,alexschiller/osf.io,felliott/osf.io,alexschiller/osf.io,monikagrabowska/osf.io,felliott/osf.io,acshi/osf.io,brianjgeiger/osf.io,acshi/osf.io,sloria/osf.io,icereval/osf.io,samchrisinger/osf.io,emetsger/osf.io,felliott/osf.io,chrisseto/osf.io,mluo613/osf.io,monikagrabowska/osf.io,crcresearch/osf.io,saradbowman/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,icereval/osf.io,caseyrollins/osf.io,caseyrollins/osf.io,Nesiehr/osf.io,hmoco/osf.io,mfraezz/osf.io,cwisecarver/osf.io,Nesiehr/osf.io,cwisecarver/osf.io,acshi/osf.io,brianjgeiger/osf.io,chennan47/osf.io,chrisseto/osf.io,crcresearch/osf.io,erinspace/osf.io,mfraezz/osf.io,leb2dg/osf.io,CenterForOpenScience/osf.io,erinspace/osf.io,aaxelb/osf.io,emetsger/osf.io,HalcyonChimera/osf.io,TomBaxter/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,aaxelb/osf.io
website/project/taxonomies/__init__.py
website/project/taxonomies/__init__.py
import json import os from website import settings from modularodm import fields, Q from modularodm.exceptions import NoResultsFound from framework.mongo import ( ObjectId, StoredObject, utils as mongo_utils ) @mongo_utils.unique_on(['id', '_id']) class Subject(StoredObject): _id = fields.StringField(primary=True, default=lambda: str(ObjectId())) type = fields.StringField(required=True) text = fields.StringField(required=True) parent = fields.ForeignField('subject', index=True) def ensure_taxonomies(): with open( os.path.join( settings.APP_PATH, 'website', 'static', 'plos_taxonomy.json' ) ) as fp: taxonomy = json.load(fp) # For now, only PLOS taxonomies are loaded, other types possibly considered in the future type = 'plos' for subject_path in taxonomy.get('data'): subjects = subject_path.split('_') text = subjects[-1] parent = None if len(subjects) > 1: parent = subjects[-2] try: subject = Subject.find_one( Q('text', 'eq', text) & Q('type', 'eq', type) ) except NoResultsFound: subject = Subject( type = type, text = text, parent = parent ) else: subject.type = type subject.text = text subject.parent = parent subject.save()
apache-2.0
Python
e747714e16250f3c2e85d09520f36953b1c417c3
Create HeapSort.py
salman-bhai/DS-Algo-Handbook,salman-bhai/DS-Algo-Handbook,salman-bhai/DS-Algo-Handbook,salman-bhai/DS-Algo-Handbook
Algorithms/Sort_Algorithms/Heap_Sort/HeapSort.py
Algorithms/Sort_Algorithms/Heap_Sort/HeapSort.py
# Python program for implementation of heap Sort # To heapify subtree rooted at index i. # n is size of heap def heapify(arr, n, i): largest = i # Initialize largest as root l = 2 * i + 1 # left = 2*i + 1 r = 2 * i + 2 # right = 2*i + 2 # See if left child of root exists and is # greater than root if l < n and arr[i] < arr[l]: largest = l # See if right child of root exists and is # greater than root if r < n and arr[largest] < arr[r]: largest = r # Change root, if needed if largest != i: arr[i],arr[largest] = arr[largest],arr[i] # swap # Heapify the root. heapify(arr, n, largest) # The main function to sort an array of given size def heapSort(arr): n = len(arr) # Build a maxheap. for i in range(n, -1, -1): heapify(arr, n, i) # One by one extract elements for i in range(n-1, 0, -1): arr[i], arr[0] = arr[0], arr[i] # swap heapify(arr, i, 0) # Driver code to test above arr = [ 12, 11, 13, 5, 6, 7] heapSort(arr) n = len(arr) print ("Sorted array is") for i in range(n): print ("%d" %arr[i]),
mit
Python
5c0730d7caef6503e3f97849d9df6825c289e9a0
Fix check for valid emoji.
jrowan/zulip,PhilSk/zulip,JPJPJPOPOP/zulip,sonali0901/zulip,rht/zulip,rishig/zulip,cosmicAsymmetry/zulip,hackerkid/zulip,brockwhittaker/zulip,shubhamdhama/zulip,rishig/zulip,Galexrt/zulip,sonali0901/zulip,christi3k/zulip,rht/zulip,cosmicAsymmetry/zulip,sharmaeklavya2/zulip,JPJPJPOPOP/zulip,souravbadami/zulip,AZtheAsian/zulip,timabbott/zulip,dawran6/zulip,amanharitsh123/zulip,kou/zulip,blaze225/zulip,jainayush975/zulip,zulip/zulip,SmartPeople/zulip,blaze225/zulip,andersk/zulip,isht3/zulip,eeshangarg/zulip,amyliu345/zulip,synicalsyntax/zulip,dattatreya303/zulip,mahim97/zulip,j831/zulip,jphilipsen05/zulip,vaidap/zulip,PhilSk/zulip,j831/zulip,christi3k/zulip,tommyip/zulip,punchagan/zulip,amyliu345/zulip,jainayush975/zulip,jackrzhang/zulip,rht/zulip,SmartPeople/zulip,dattatreya303/zulip,Galexrt/zulip,tommyip/zulip,sonali0901/zulip,samatdav/zulip,JPJPJPOPOP/zulip,punchagan/zulip,tommyip/zulip,christi3k/zulip,zulip/zulip,blaze225/zulip,isht3/zulip,verma-varsha/zulip,jainayush975/zulip,samatdav/zulip,vaidap/zulip,sharmaeklavya2/zulip,niftynei/zulip,AZtheAsian/zulip,jphilipsen05/zulip,cosmicAsymmetry/zulip,Galexrt/zulip,synicalsyntax/zulip,kou/zulip,showell/zulip,amyliu345/zulip,jainayush975/zulip,vabs22/zulip,sharmaeklavya2/zulip,dawran6/zulip,shubhamdhama/zulip,vabs22/zulip,AZtheAsian/zulip,souravbadami/zulip,j831/zulip,showell/zulip,hackerkid/zulip,eeshangarg/zulip,amyliu345/zulip,AZtheAsian/zulip,blaze225/zulip,amyliu345/zulip,sonali0901/zulip,eeshangarg/zulip,jackrzhang/zulip,isht3/zulip,amanharitsh123/zulip,susansls/zulip,christi3k/zulip,mahim97/zulip,Galexrt/zulip,j831/zulip,jphilipsen05/zulip,jackrzhang/zulip,hackerkid/zulip,mahim97/zulip,aakash-cr7/zulip,andersk/zulip,SmartPeople/zulip,Diptanshu8/zulip,punchagan/zulip,dawran6/zulip,timabbott/zulip,susansls/zulip,andersk/zulip,dawran6/zulip,niftynei/zulip,susansls/zulip,blaze225/zulip,punchagan/zulip,aakash-cr7/zulip,JPJPJPOPOP/zulip,isht3/zulip,synicalsyntax/zulip,brainwane/zulip,Diptanshu8/zulip,rishig/zulip,brainwane/zulip,jackrzhang/zulip,ryanbackman/zulip,verma-varsha/zulip,synicalsyntax/zulip,PhilSk/zulip,brockwhittaker/zulip,samatdav/zulip,susansls/zulip,sharmaeklavya2/zulip,timabbott/zulip,dhcrzf/zulip,Galexrt/zulip,SmartPeople/zulip,tommyip/zulip,jphilipsen05/zulip,vabs22/zulip,souravbadami/zulip,tommyip/zulip,rht/zulip,kou/zulip,isht3/zulip,aakash-cr7/zulip,verma-varsha/zulip,dhcrzf/zulip,brainwane/zulip,dattatreya303/zulip,cosmicAsymmetry/zulip,shubhamdhama/zulip,showell/zulip,jackrzhang/zulip,zulip/zulip,shubhamdhama/zulip,dhcrzf/zulip,rht/zulip,Diptanshu8/zulip,showell/zulip,brockwhittaker/zulip,rishig/zulip,eeshangarg/zulip,niftynei/zulip,amanharitsh123/zulip,isht3/zulip,andersk/zulip,vaidap/zulip,kou/zulip,verma-varsha/zulip,tommyip/zulip,jainayush975/zulip,zulip/zulip,vabs22/zulip,jphilipsen05/zulip,Galexrt/zulip,Galexrt/zulip,andersk/zulip,punchagan/zulip,dawran6/zulip,JPJPJPOPOP/zulip,hackerkid/zulip,aakash-cr7/zulip,amanharitsh123/zulip,brockwhittaker/zulip,andersk/zulip,vabs22/zulip,brockwhittaker/zulip,aakash-cr7/zulip,amyliu345/zulip,amanharitsh123/zulip,j831/zulip,christi3k/zulip,jackrzhang/zulip,vabs22/zulip,verma-varsha/zulip,zulip/zulip,Diptanshu8/zulip,dhcrzf/zulip,timabbott/zulip,christi3k/zulip,showell/zulip,JPJPJPOPOP/zulip,rishig/zulip,dattatreya303/zulip,eeshangarg/zulip,timabbott/zulip,brainwane/zulip,showell/zulip,Diptanshu8/zulip,punchagan/zulip,ryanbackman/zulip,dhcrzf/zulip,shubhamdhama/zulip,PhilSk/zulip,andersk/zulip,dhcrzf/zulip,niftynei/zulip,AZtheAsian/zulip,j831/zulip,dawran6/zulip,jrowan/zulip,samatdav/zulip,jrowan/zulip,souravbadami/zulip,timabbott/zulip,punchagan/zulip,synicalsyntax/zulip,cosmicAsymmetry/zulip,sharmaeklavya2/zulip,synicalsyntax/zulip,blaze225/zulip,zulip/zulip,rishig/zulip,dattatreya303/zulip,shubhamdhama/zulip,vaidap/zulip,amanharitsh123/zulip,jrowan/zulip,tommyip/zulip,samatdav/zulip,kou/zulip,sonali0901/zulip,ryanbackman/zulip,ryanbackman/zulip,brainwane/zulip,mahim97/zulip,PhilSk/zulip,ryanbackman/zulip,hackerkid/zulip,niftynei/zulip,hackerkid/zulip,AZtheAsian/zulip,SmartPeople/zulip,jrowan/zulip,rht/zulip,dhcrzf/zulip,hackerkid/zulip,susansls/zulip,showell/zulip,dattatreya303/zulip,aakash-cr7/zulip,brockwhittaker/zulip,kou/zulip,PhilSk/zulip,mahim97/zulip,SmartPeople/zulip,jphilipsen05/zulip,Diptanshu8/zulip,susansls/zulip,jackrzhang/zulip,vaidap/zulip,verma-varsha/zulip,synicalsyntax/zulip,zulip/zulip,ryanbackman/zulip,sharmaeklavya2/zulip,brainwane/zulip,cosmicAsymmetry/zulip,jrowan/zulip,shubhamdhama/zulip,vaidap/zulip,rht/zulip,rishig/zulip,mahim97/zulip,souravbadami/zulip,niftynei/zulip,samatdav/zulip,timabbott/zulip,kou/zulip,sonali0901/zulip,eeshangarg/zulip,jainayush975/zulip,eeshangarg/zulip,brainwane/zulip,souravbadami/zulip
zerver/views/reactions.py
zerver/views/reactions.py
from __future__ import absolute_import from django.http import HttpRequest, HttpResponse from django.utils.translation import ugettext as _ from typing import Text from zerver.decorator import authenticated_json_post_view,\ has_request_variables, REQ, to_non_negative_int from zerver.lib.actions import do_add_reaction, do_remove_reaction from zerver.lib.bugdown import emoji_list from zerver.lib.message import access_message from zerver.lib.request import JsonableError from zerver.lib.response import json_success from zerver.models import Reaction, Realm, UserProfile def check_valid_emoji(realm, emoji_name): # type: (Realm, Text) -> None if emoji_name in set(realm.get_emoji().keys()): return if emoji_name in emoji_list: return raise JsonableError(_("Emoji '%s' does not exist" % (emoji_name,))) @has_request_variables def add_reaction_backend(request, user_profile, message_id, emoji_name): # type: (HttpRequest, UserProfile, int, Text) -> HttpResponse # access_message will throw a JsonableError exception if the user # cannot see the message (e.g. for messages to private streams). message = access_message(user_profile, message_id)[0] check_valid_emoji(message.sender.realm, emoji_name) # We could probably just make this check be a try/except for the # IntegrityError from it already existing, but this is a bit cleaner. if Reaction.objects.filter(user_profile=user_profile, message=message, emoji_name=emoji_name).exists(): raise JsonableError(_("Reaction already exists")) do_add_reaction(user_profile, message, emoji_name) return json_success() @has_request_variables def remove_reaction_backend(request, user_profile, message_id, emoji_name): # type: (HttpRequest, UserProfile, int, Text) -> HttpResponse # access_message will throw a JsonableError exception if the user # cannot see the message (e.g. for messages to private streams). message = access_message(user_profile, message_id)[0] check_valid_emoji(message.sender.realm, emoji_name) # We could probably just make this check be a try/except for the # IntegrityError from it already existing, but this is a bit cleaner. if not Reaction.objects.filter(user_profile=user_profile, message=message, emoji_name=emoji_name).exists(): raise JsonableError(_("Reaction does not exist")) do_remove_reaction(user_profile, message, emoji_name) return json_success()
from __future__ import absolute_import from django.http import HttpRequest, HttpResponse from django.utils.translation import ugettext as _ from typing import Text from zerver.decorator import authenticated_json_post_view,\ has_request_variables, REQ, to_non_negative_int from zerver.lib.actions import do_add_reaction, do_remove_reaction from zerver.lib.bugdown import emoji_list from zerver.lib.message import access_message from zerver.lib.request import JsonableError from zerver.lib.response import json_success from zerver.models import Reaction, UserProfile @has_request_variables def add_reaction_backend(request, user_profile, message_id, emoji_name): # type: (HttpRequest, UserProfile, int, Text) -> HttpResponse # access_message will throw a JsonableError exception if the user # cannot see the message (e.g. for messages to private streams). message = access_message(user_profile, message_id)[0] existing_emojis = set(message.sender.realm.get_emoji().keys()) or set(emoji_list) if emoji_name not in existing_emojis: raise JsonableError(_("Emoji '%s' does not exist" % (emoji_name,))) # We could probably just make this check be a try/except for the # IntegrityError from it already existing, but this is a bit cleaner. if Reaction.objects.filter(user_profile=user_profile, message=message, emoji_name=emoji_name).exists(): raise JsonableError(_("Reaction already exists")) do_add_reaction(user_profile, message, emoji_name) return json_success() @has_request_variables def remove_reaction_backend(request, user_profile, message_id, emoji_name): # type: (HttpRequest, UserProfile, int, Text) -> HttpResponse # access_message will throw a JsonableError exception if the user # cannot see the message (e.g. for messages to private streams). message = access_message(user_profile, message_id)[0] existing_emojis = set(message.sender.realm.get_emoji().keys()) or set(emoji_list) if emoji_name not in existing_emojis: raise JsonableError(_("Emoji '%s' does not exist" % (emoji_name,))) # We could probably just make this check be a try/except for the # IntegrityError from it already existing, but this is a bit cleaner. if not Reaction.objects.filter(user_profile=user_profile, message=message, emoji_name=emoji_name).exists(): raise JsonableError(_("Reaction does not exist")) do_remove_reaction(user_profile, message, emoji_name) return json_success()
apache-2.0
Python
122f24b24f16ab9ece5707919255371002929e8d
ADD RegisterTensorService
OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft
apps/domain/src/main/core/services/tensor_service.py
apps/domain/src/main/core/services/tensor_service.py
# stdlib import secrets from typing import List from typing import Type from typing import Union # third party from nacl.signing import VerifyKey # syft relative from syft.core.node.abstract.node import AbstractNode from syft.core.node.common.service.auth import service_auth from syft.core.node.common.service.node_service import ImmediateNodeServiceWithReply from syft.core.node.common.service.node_service import ImmediateNodeServiceWithoutReply from syft.decorators.syft_decorator_impl import syft_decorator from syft.core.common.message import ImmediateSyftMessageWithReply from syft.grid.messages.tensor_messages import ( CreateTensorMessage, CreateTensorResponse, GetTensorMessage, GetTensorResponse, UpdateTensorMessage, UpdateTensorResponse, DeleteTensorMessage, DeleteTensorResponse, GetTensorsMessage, GetTensorsResponse, ) @syft_decorator(typechecking=True) def create_tensor_msg( msg: CreateTensorMessage, ) -> CreateTensorResponse: return CreateTensorResponse( address=msg.reply_to, success=True, content={"msg": "tensor created succesfully!"}, ) @syft_decorator(typechecking=True) def update_tensor_msg( msg: UpdateTensorMessage, ) -> UpdateTensorResponse: return UpdateTensorResponse( address=msg.reply_to, success=True, content={"msg": "tensor changed succesfully!"}, ) @syft_decorator(typechecking=True) def get_tensor_msg( msg: GetTensorMessage, ) -> GetTensorResponse: return GetTensorResponse( address=msg.reply_to, success=True, content={ "tensor": { "id": "5484626", "tags": ["tensor-a"], "description": "tensor sample", } }, ) @syft_decorator(typechecking=True) def get_tensors_msg( msg: GetTensorsMessage, ) -> GetTensorsResponse: return GetTensorsResponse( address=msg.reply_to, success=True, content={ "tensors": [ { "id": "35654sad6ada", "tags": ["tensor-a"], "description": "tensor sample", }, { "id": "adfarf3f1af5", "tags": ["tensor-b"], "description": "tensor sample", }, { "id": "fas4e6e1fas", "tags": ["tensor-c"], "description": "tensor sample", }, ] }, ) @syft_decorator(typechecking=True) def del_tensor_msg( msg: DeleteTensorMessage, ) -> DeleteTensorResponse: return DeleteTensorResponse( address=msg.reply_to, success=True, content={"msg": "tensor deleted succesfully!"}, ) class RegisterTensorService(ImmediateNodeServiceWithReply): msg_handler_map = { CreateTensorMessage: create_tensor_msg, UpdateTensorMessage: update_tensor_msg, GetTensorMessage: get_tensor_msg, GetTensorsMessage: get_tensors_msg, DeleteTensorMessage: del_tensor_msg, } @staticmethod @service_auth(guests_welcome=True) def process( node: AbstractNode, msg: Union[ CreateTensorMessage, UpdateTensorMessage, GetTensorMessage, GetTensorsMessage, DeleteTensorMessage, ], verify_key: VerifyKey, ) -> Union[ CreateTensorResponse, UpdateTensorResponse, GetTensorResponse, GetTensorsResponse, DeleteTensorResponse, ]: return RegisterTensorService.msg_handler_map[type(msg)](msg=msg) @staticmethod def message_handler_types() -> List[Type[ImmediateSyftMessageWithReply]]: return [ CreateTensorMessage, UpdateTensorMessage, GetTensorMessage, GetTensorsMessage, DeleteTensorMessage, ]
apache-2.0
Python
17fcfd6d1962b23429d48a8a45dfb0944c2f1453
Add constraints.py
PyconUK/ConferenceScheduler
conference_scheduler/constraints.py
conference_scheduler/constraints.py
from typing import Callable, List, Dict class Constraint(NamedTuple): function: Callable args: List kwargs: Dict operator: Callable value: int
mit
Python
e9efb5e2ba19fcda77e35d0efdaa03b13d025df0
create model of a feature
DevMine/devmine-core
devmine/app/models/feature.py
devmine/app/models/feature.py
from sqlalchemy import ( Column, Integer, String ) from devmine.app.models import Base class Feature(Base): """Model of a feature.""" __tablename__ = 'features' id = Column(Integer, primary_key=True) name = Column(String, nullable=False, unique=True) def __init__(self): pass
bsd-3-clause
Python
7491f500c75850c094158b4621fdef602bce3d27
Add benchmarks for custom generators
maxalbert/tohu
benchmarks/benchmarks/benchmark_custom_generators.py
benchmarks/benchmarks/benchmark_custom_generators.py
from tohu.v6.primitive_generators import Integer, HashDigest, FakerGenerator from tohu.v6.derived_generators import Apply, Lookup, SelectOne, SelectMultiple from tohu.v6.custom_generator import CustomGenerator from .common import NUM_PARAMS mapping = { 'A': ['a', 'aa', 'aaa', 'aaaa', 'aaaaa'], 'B': ['b', 'bb', 'bbb', 'bbbb', 'bbbbb'], 'C': ['c', 'cc', 'ccc', 'cccc', 'ccccc'], 'D': ['d', 'dd', 'ddd', 'dddd', 'ddddd'], 'E': ['e', 'ee', 'eee', 'eeee', 'eeeee'], 'F': ['f', 'ff', 'fff', 'ffff', 'fffff'], 'G': ['g', 'gg', 'ggg', 'gggg', 'ggggg'], } class Quux1Generator(CustomGenerator): aa = Integer(100, 200) bb = HashDigest(length=8) cc = FakerGenerator(method="name") class Quux2Generator(CustomGenerator): aa = SelectOne(['A', 'B', 'C', 'D', 'E', 'F', 'G']) ll = Lookup(key=aa, mapping=mapping) nn = Integer(1, 5) bb = SelectMultiple(ll, num=nn) class Quux3Generator(CustomGenerator): bb = SelectMultiple(Lookup(SelectOne(['A', 'B', 'C', 'D', 'E', 'F', 'G']), mapping), num=Integer(1, 5)) class TimeBasicCustomGenerator: params = NUM_PARAMS def setup(self, num): self.g1 = Quux1Generator() def time_basic_custom_generator(self, num): self.g1.generate(num=num) class TimeComplexCustomGeneratorWithExplicitlyNamedFields: params = NUM_PARAMS def setup(self, num): self.g2 = Quux2Generator() def time_complex_custom_generator_with_explicitly_named_fields(self, num): self.g2.generate(num=num) class TimeComplexCustomGeneratorWithAnonymousFields: params = NUM_PARAMS def setup(self, num): self.g3 = Quux3Generator() def time_complex_custom_generator_with_anonymous_fields(self, num): self.g3.generate(num=num)
mit
Python
63d45b975d33227b65e79644622773a49dd7ccc6
Add new package: libxcrypt (#18783)
LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack
var/spack/repos/builtin/packages/libxcrypt/package.py
var/spack/repos/builtin/packages/libxcrypt/package.py
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Libxcrypt(AutotoolsPackage): """libxcrypt is a modern library for one-way hashing of passwords.""" homepage = "https://github.com/besser82/libxcrypt" url = "https://github.com/besser82/libxcrypt/archive/v4.4.17.tar.gz" version('4.4.17', sha256='7665168d0409574a03f7b484682e68334764c29c21ca5df438955a381384ca07') version('4.4.16', sha256='a98f65b8baffa2b5ba68ee53c10c0a328166ef4116bce3baece190c8ce01f375') version('4.4.15', sha256='8bcdef03bc65f9dbda742e56820435b6f13eea59fb903765141c6467f4655e5a') depends_on('autoconf', type='build') depends_on('automake', type='build') depends_on('libtool', type='build') depends_on('m4', type='build')
lgpl-2.1
Python
465b83e394c2bb90a85580946e291d0249fc754e
Fix model fields label
TamiaLab/carnetdumaker,TamiaLab/carnetdumaker,TamiaLab/carnetdumaker,TamiaLab/carnetdumaker
apps/accounts/migrations/0005_auto_20160101_1840.py
apps/accounts/migrations/0005_auto_20160101_1840.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts', '0004_auto_20151227_1553'), ] operations = [ migrations.AlterField( model_name='userprofile', name='biography_text', field=models.TextField(editable=False, verbose_name='Biography (raw text)', blank=True, default=''), ), migrations.AlterField( model_name='userprofile', name='signature_text', field=models.TextField(editable=False, verbose_name='Signature (raw text)', blank=True, default=''), ), ]
agpl-3.0
Python
6c599caaf8a4daadfe287898901cad54fda37875
add Post model
CyboLabs/XdaPy
XdaPy/model/post.py
XdaPy/model/post.py
# Copyright (C) 2014 cybojenix <anthonydking@slimroms.net> # # This file is part of XdaPy. # # XdaPy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # XdaPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with XdaPy. If not, see <http://www.gnu.org/licenses/>. class Post(object): def __init__(self, data): if data is None: data = {} assert hasattr(data, "get") self.post_id = data.get("postid", "") self.visible = bool(data.get("visible")) self.user_id = data.get("userid", "") self.title = data.get("title", "") self.page_text = data.get("pagetext", "") self.username = data.get("username", "") self.date_line = data.get("dateline", "") self.avatar_url = data.get("avatar_url", "") self.thanks_count = data.get("thanks_count", "") self.has_thanked = bool(data.get("has_thanked"))
apache-2.0
Python
698e46f7842e16124235365a180ddee7532d11ff
Create 2017-02-20-fundamentaltheoremofarithmetic.py
art-of-algorithm/art-of-algorithm.github.io,art-of-algorithm/art-of-algorithm.github.io
_posts/2017-02-20-fundamentaltheoremofarithmetic.py
_posts/2017-02-20-fundamentaltheoremofarithmetic.py
#Fundamental theorem of arithmetic states that:every positive integer greater #than one can be expressed as unique product of primes.for ex,90=2*3*3*5 #Following is an application of above theorem def primefactors(n): i=0 factors=[] #here primelist is list of all primes of a given no p=primelist[i] while p<=n: if n%p==0: factors.append(p) n //=p else: i +=1 p=primelist[i] return factors
mit
Python
201ca88243bf8d0736c5f61b64abeacba82e7da7
Add memory.py
niffler92/Bandit,niffler92/Bandit
bandit/memory.py
bandit/memory.py
import numpy as np class Memory(object): """ This is a memory saver for contextual bandit """ def __init__(self): pass
apache-2.0
Python
d720f0a50dce424ddbb319fd8cd518cc7adb3a1f
Add LBlock impementation
willir/cryptoResearch
lblockSimple.py
lblockSimple.py
#!/usr/bin/env python3 """ POC implementation of LBlock Cipher (http://eprint.iacr.org/2011/345.pdf) """ s0 = [14, 9, 15, 0, 13, 4, 10, 11, 1, 2, 8, 3, 7, 6, 12, 5] s1 = [4, 11, 14, 9, 15, 13, 0, 10, 7, 12, 5, 6, 2, 8, 1, 3] s2 = [1, 14, 7, 12, 15, 13, 0, 6, 11, 5, 9, 3, 2, 4, 8, 10] s3 = [7, 6, 8, 11, 0, 15, 3, 14, 9, 10, 12, 13, 5, 2, 4, 1] s4 = [14, 5, 15, 0, 7, 2, 12, 13, 1, 8, 4, 9, 11, 10, 6, 3] s5 = [2, 13, 11, 12, 15, 14, 0, 9, 7, 10, 6, 3, 1, 8, 4, 5] s6 = [11, 9, 4, 14, 0, 15, 10, 13, 6, 12, 5, 7, 3, 8, 1, 2] s7 = [13, 10, 15, 0, 14, 4, 9, 11, 2, 1, 8, 3, 7, 5, 12, 6] s8 = [8, 7, 14, 5, 15, 13, 0, 6, 11, 12, 9, 10, 2, 4, 1, 3] s9 = [11, 5, 15, 0, 7, 2, 9, 13, 4, 8, 1, 12, 14, 10, 3, 6] def bitstr(n, width=None): """return the binary representation of n as a string and optionally zero-fill (pad) it to a given length """ result = list() while n: result.append(str(n % 2)) n = int(n / 2) if (width is not None) and len(result) < width: result.extend(['0'] * (width - len(result))) result.reverse() return ''.join(result) def mask(n): """Return a bitmask of length n (suitable for masking against an int to coerce the size to a given length) """ if n >= 0: return 2 ** n - 1 else: return 0 def rol(n, rotations=1, width=8): """Return a given number of bitwise left rotations of an integer n, for a given bit field width. """ rotations %= width if rotations < 1: return n n &= mask(width) # Should it be an error to truncate here? return ((n << rotations) & mask(width)) | (n >> (width - rotations)) def ror(n, rotations=1, width=8): """Return a given number of bitwise right rotations of an integer n, for a given bit field width. """ rotations %= width if rotations < 1: return n n &= mask(width) return (n >> rotations) | ((n << (width - rotations)) & mask(width)) def F(x): return s6[(x & 0xf000000) >> 24] << 28 | \ s4[(x & 0xf0000) >> 16] << 24 | \ s7[(x & 0xf0000000) >> 28] << 20 | \ s5[(x & 0xf00000) >> 20] << 16 | \ s2[(x & 0xf00) >> 8] << 12 | \ s0[(x & 0xf) >> 0] << 8 | \ s3[(x & 0xf000) >> 12] << 4 | \ s1[(x & 0xf0) >> 4] << 0 def keySchedule(K): RK = list() RK.append((K & (mask(32) << 48)) >> 48) # 32 left most bits for r in range(1, 32): K = rol(K, rotations=29, width=80) K = (s9[K >> 76] << 76) | (s8[(K >> 72) & 0xf] << 72) | (K & mask(72)) K ^= r << 46 RK.append((K & (mask(32) << 48)) >> 48) # 32 left most bits return RK def Enc(P, RK): X1 = (P >> 32) & 0xffffffff X0 = P & 0xffffffff for r in range(32): nextX = F(X1 ^ RK[r]) ^ rol(X0, rotations=8, width=32) X0 = X1 X1 = nextX return (X0 << 32) | X1 def Dec(P, RK): X0 = (P >> 32) & 0xffffffff X1 = P & 0xffffffff for r in range(31, -1, -1): prevX = ror(F(X0 ^ RK[r]) ^ X1, rotations=8, width=32) X1 = X0 X0 = prevX return (X1 << 32) | X0 def encrypt(plain: b'', key: b'') -> b'': RK = keySchedule(key) return Enc(plain, RK) def decrypt(cipher: b'', key: b'') -> b'': RK = keySchedule(key) return Dec(cipher, RK) if __name__ == '__main__': # rKeys = Key_Schedule(0x0123456789abcdeffedc) # for rKey in rKeys: # print(hex(rKey)) key1 = 0x00000000000000000000 key2 = 0x0123456789abcdeffedc enc1 = encrypt(plain=0x0000000000000000, key=key1) enc2 = encrypt(plain=0x0123456789abcdef, key=key2) dec1 = decrypt(cipher=enc1, key=key1) dec2 = decrypt(cipher=enc2, key=key2) print(hex(enc1)) print(hex(enc2)) print(hex(dec1)) print(hex(dec2)) # RK = Key_Schedule(0x0123456789abcdef) # print(hex(Enc(0x0123456789abcdef, RK)))
mit
Python
f72af94f29a1797f9f23dbfe3431ec66ff36e6b4
add example
ccxt/ccxt,ccxt/ccxt,ccxt/ccxt,ccxt/ccxt,ccxt/ccxt
examples/py/wazirx-create-cancel-orders.py
examples/py/wazirx-create-cancel-orders.py
# -*- coding: utf-8 -*- import os import sys from pprint import pprint root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(root + '/python') import ccxt # noqa: E402 print('CCXT Version:', ccxt.__version__) exchange = ccxt.wazirx({ 'enableRateLimit': True, 'apiKey': 'YOUR_API_KEY', 'secret': 'YOUR_SECRET', 'options': { 'defaultType': 'swap', }, }) markets = exchange.load_markets() symbol = 'LTC/USDT' amount = 0.1 price = 20 # Opening limit order order = exchange.create_order(symbol, 'limit', 'buy', amount, price) pprint(order) # Opening stop-limit order order2 = exchange.create_order(symbol, 'limit', 'buy', amount, price, {"stopPrice": 70}) pprint(order2) # Opening second limit order order3 = exchange.create_order(symbol, 'limit', 'buy', amount, price) pprint(order3) # Canceling first limit order response = exchange.cancel_order(order['id'], symbol) print(response) # Canceling all open orders (second and third order) response = exchange.cancel_all_orders(symbol) print(response)
mit
Python
48eb4604673513b771b6def05a1652ae1b66d4d0
Add a script for storing a config variable
wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api
scripts/add_ssm_config.py
scripts/add_ssm_config.py
#!/usr/bin/env python # -*- encoding: utf-8 """ Store a config variable in SSM under the key structure /{project_id}/config/{label}/{config_key} This script can store a regular config key (unencrypted) or an encrypted key. """ import sys import boto3 import click ssm_client = boto3.client("ssm") @click.command() @click.option("--project_id", prompt="What is the project ID?", required=True) @click.option("--label", default="prod", required=True) @click.option("--config_key", prompt="What is the config key?", required=True) @click.option("--config_value", prompt="What is the config value?", required=True) def store_config_key(project_id, label, config_key, config_value): ssm_name = f"/{project_id}/config/{label}/{config_key}" resp = ssm_client.put_parameter( Name=ssm_name, Description=f"Config value populated by {__file__}", Value=config_value, Type="String", Overwrite=True, ) if resp["ResponseMetadata"]["HTTPStatusCode"] == 200: print(f"{ssm_name} -> {config_value!r}") else: print(f"Unexpected error: {resp}") sys.exit(1) if __name__ == "__main__": store_config_key()
mit
Python
e85c07cfe614813180d9795e1fa4deda00e6b84e
add manual replication script my Max Dornseif
erikdejonge/rabshakeh-couchdb-python-progress-attachments,jur9526/couchdb-python,gcarranza/couchdb-python
couchdb/tools/manual_replication.py
couchdb/tools/manual_replication.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2009 Maximillian Dornseif <md@hudora.de> # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. """ This script replicates databases from one CouchDB server to an other. This is mainly for backup purposes or "priming" a new server before setting up trigger based replication. But you can also use the '--continuous' option to set up automatic replication on newer CouchDB versions. Use 'python manual_replication.py --help' to get more detailed usage instructions. Be careful when using 127.0.0.1 as the source-server or target-server. With pull replication you can use 127.0.0.1 on the target-server. With push replication you can use 127.0.0.1 on the source-server. But I suggest you always use Fully Qualified domain names. """ import couchdb.client import optparse import sys import time import httplib2 def compact(server, dbnames): for dbname in dbnames: sys.stdout.flush() db = server[dbname] db.resource.post('_compact') def main(): usage = '%prog [options]' parser = optparse.OptionParser(usage=usage) parser.add_option('--source-server', action='store', dest='source_url', help='the url of the server to replicate from') parser.add_option('--target-server', action='store', dest='target_url', default="http://127.0.0.1:5984", help='the url of the server to replicate to [%default]') parser.add_option('--database', action='append', dest='dbnames', help='Database to replicate. Can be given more than once. [all databases]') parser.add_option('--no-target-compaction', action='store_false', dest='compact_target', help='do not start compaction of target after replications') parser.add_option('--continuous', action='store_true', dest='continuous', help='trigger continuous replication in cochdb') parser.add_option('--push', action='store_true', help='use push instead of pull replication') parser.add_option('--debug', action='store_true', dest='debug') options, args = parser.parse_args() if not options.target_url or (not options.source_url): parser.error("Need at least --source-server and --target-server") sys.exit(1) if options.debug: httplib2.debuglevel = 1 if not options.source_url.endswith('/'): options.source_url = options.source_url + '/' if not options.target_url.endswith('/'): options.target_url = options.target_url + '/' source_server = couchdb.client.Server(options.source_url) target_server = couchdb.client.Server(options.target_url) if not options.dbnames: dbnames = source_server.resource.get('_all_dbs')[1] dbnames.sort() else: dbnames = options.dbnames for dbname in sorted(dbnames, reverse=True): start = time.time() print dbname, sys.stdout.flush() if dbname not in target_server.resource.get('_all_dbs')[1]: target_server.create(dbname) print "created", sys.stdout.flush() body = {} if options.continuous: body['continuous'] = True if options.push: body.update({'source': dbname, 'target': '%s%s' % (options.target_url, dbname)}) ret = source_server.resource.post('_replicate', body) else: # pull seems to be more reliable than push body.update({'source': '%s%s' % (options.source_url, dbname), 'target': dbname}) ret = target_server.resource.post('_replicate', body) print "%.1f s" % (time.time() - start) if options.compact_target: compact(target_server, dbnames) if __name__ == '__main__': main()
bsd-3-clause
Python
cf469dcba17d3a93bd4bb1651fff6a22de4bc5ba
add code to access database
trthanhquang/wayback-data-collector
louis-html-analyzer/database.py
louis-html-analyzer/database.py
import MySQLdb class database: def __init__(self, hostName="localhost", userName="root", password="", database="wbm"): self.db = MySQLdb.connect(host = hostName, user = userName, passwd = password, db = database) self.db.autocommit(True) self.cur = self.db.cursor() def getHTML(self,itemID): getHTML_query = "select snapshot_date, crawl_data, meaningfulText from snapshot_allyear where itemID = %s order by snapshot_date desc" % itemID self.cur.execute(getHTML_query) return self.cur.fetchall() #return type: (date, html, text) if __name__ == '__main__': db = database() htmlist = db.getHTML(3394) for (date,html,text) in htmlist: print date,text print '------------------------------------------------------------'
apache-2.0
Python
4158b54244cda38b5643f07d9ad825877c7ff2d7
Make subset module callable
fonttools/fonttools,googlefonts/fonttools
Lib/fontTools/subset/__main__.py
Lib/fontTools/subset/__main__.py
from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from fontTools.subset import main main()
mit
Python
bdfa3e67606e3bae243a64ad1e502edf552d2fdf
add problem 17
branning/euler,branning/euler
euler017.py
euler017.py
#!/usr/bin/env python # this barely works, but does output correct words up to 1000 def num2words(n): onesteens = { 1 : "one", 2 : "two", 3 : "three", 4 : "four", 5 : "five", 6 : "six", 7 : "seven", 8 : "eight", 9 : "nine", 10 : "ten", 11 : "eleven", 12 : "twelve", 13 : "thirteen", 14 : "fourteen", 15 : "fifteen", 16 : "sixteen", 17 : "seventeen", 18 : "eighteen", 19 : "nineteen" } tens = { 2 : "twenty", 3 : "thirty", 4 : "forty", 5 : "fifty", 6 : "sixty", 7 : "seventy", 8 : "eighty", 9 : "ninety", } powersoften = { 100 : "hundred", 1000 : "thousand" } words = [] if n > 999: thousands = n / 1000 words.extend([onesteens[thousands], "thousand"]) if n % 1000 > 99: hundreds = n / 100 words.extend([onesteens[hundreds], "hundred"]) if n % 100 != 0 and n > 100: words.append("and") if n % 100 >= 20: words.append(tens[n % 100 / 10]) if n % 10 != 0: words.append(onesteens[n % 10]) elif n % 100 != 0 : words.append(onesteens[n % 100]) return words if __name__=="__main__": debugging = False sum = 0 for i in range(1,1001): words = num2words(i) if debugging: print ' '.join(words) sum += len(''.join(words)) print sum
mit
Python
50dded21e316b6b8e6cb7800b17ed7bd92624946
Add toy example of reading a large XML file
tiffanyj41/hermes,tiffanyj41/hermes,tiffanyj41/hermes,tiffanyj41/hermes
xml_to_json.py
xml_to_json.py
#!/usr/bin/env python import xml.etree.cElementTree as ET from sys import argv input_file = argv[1] NAMESPACE = "{http://www.mediawiki.org/xml/export-0.10/}" with open(input_file) as open_file: in_page = False for _, elem in ET.iterparse(open_file): # Pull out each revision if elem.tag == NAMESPACE + "revision": # Look at each subtag, if it is the 'sha1' tag, print out the text content for child in elem: if child.tag == NAMESPACE + "sha1": print child.text # Clear the child to free up memory child.clear() # Now clear the parent once we've finished with it to further clean up elem.clear()
apache-2.0
Python
8176e8784247262d32e1adad5f86b181c1a202ca
Test echo sql
saguziel/incubator-airflow,preete-dixit-ck/incubator-airflow,sekikn/incubator-airflow,sekikn/incubator-airflow,akosel/incubator-airflow,artwr/airflow,spektom/incubator-airflow,saguziel/incubator-airflow,jhsenjaliya/incubator-airflow,storpipfugl/airflow,Tagar/incubator-airflow,danielvdende/incubator-airflow,airbnb/airflow,adamhaney/airflow,KL-WLCR/incubator-airflow,mtustin-handy/airflow,ledsusop/airflow,ProstoMaxim/incubator-airflow,sdiazb/airflow,sid88in/incubator-airflow,dhuang/incubator-airflow,holygits/incubator-airflow,airbnb/airflow,asnir/airflow,r39132/airflow,zack3241/incubator-airflow,janczak10/incubator-airflow,moritzpein/airflow,easytaxibr/airflow,CloverHealth/airflow,gtoonstra/airflow,vineet-rh/incubator-airflow,r39132/airflow,mylons/incubator-airflow,jgao54/airflow,easytaxibr/airflow,alexvanboxel/airflow,bolkedebruin/airflow,yati-sagade/incubator-airflow,Chedi/airflow,edgarRd/incubator-airflow,andrewmchen/incubator-airflow,zoyahav/incubator-airflow,aminghadersohi/airflow,jlowin/airflow,Twistbioscience/incubator-airflow,adamhaney/airflow,adrpar/incubator-airflow,opensignal/airflow,gtoonstra/airflow,lyft/incubator-airflow,skudriashev/incubator-airflow,mtustin-handy/airflow,jwi078/incubator-airflow,forevernull/incubator-airflow,rishibarve/incubator-airflow,ledsusop/airflow,apache/incubator-airflow,wxiang7/airflow,NielsZeilemaker/incubator-airflow,akosel/incubator-airflow,aminghadersohi/airflow,storpipfugl/airflow,malmiron/incubator-airflow,Fokko/incubator-airflow,mtdewulf/incubator-airflow,jwi078/incubator-airflow,mrkm4ntr/incubator-airflow,MortalViews/incubator-airflow,criccomini/airflow,andrewmchen/incubator-airflow,danielvdende/incubator-airflow,jbhsieh/incubator-airflow,ty707/airflow,mattuuh7/incubator-airflow,dhuang/incubator-airflow,jason-z-hang/airflow,jhsenjaliya/incubator-airflow,yoziru-desu/airflow,jesusfcr/airflow,sdiazb/airflow,wolfier/incubator-airflow,andyxhadji/incubator-airflow,plypaul/airflow,airbnb/airflow,sid88in/incubator-airflow,vineet-rh/incubator-airflow,zodiac/incubator-airflow,jason-z-hang/airflow,mattuuh7/incubator-airflow,preete-dixit-ck/incubator-airflow,andyxhadji/incubator-airflow,mtdewulf/incubator-airflow,dgies/incubator-airflow,owlabs/incubator-airflow,apache/airflow,brandsoulmates/incubator-airflow,dud225/incubator-airflow,lxneng/incubator-airflow,btallman/incubator-airflow,zack3241/incubator-airflow,jesusfcr/airflow,vineet-rh/incubator-airflow,ronfung/incubator-airflow,dud225/incubator-airflow,cfei18/incubator-airflow,Acehaidrey/incubator-airflow,MortalViews/incubator-airflow,hgrif/incubator-airflow,criccomini/airflow,jason-z-hang/airflow,vineet-rh/incubator-airflow,jlowin/airflow,sekikn/incubator-airflow,apache/airflow,yk5/incubator-airflow,malmiron/incubator-airflow,jesusfcr/airflow,wndhydrnt/airflow,subodhchhabra/airflow,lxneng/incubator-airflow,juvoinc/airflow,yoziru-desu/airflow,apache/airflow,fenglu-g/incubator-airflow,sergiohgz/incubator-airflow,zodiac/incubator-airflow,owlabs/incubator-airflow,wolfier/incubator-airflow,wileeam/airflow,mrkm4ntr/incubator-airflow,caseyching/incubator-airflow,stverhae/incubator-airflow,jfantom/incubator-airflow,andrewmchen/incubator-airflow,MetrodataTeam/incubator-airflow,DEVELByte/incubator-airflow,cfei18/incubator-airflow,alexvanboxel/airflow,andrewmchen/incubator-airflow,jiwang576/incubator-airflow,mylons/incubator-airflow,wndhydrnt/airflow,jhsenjaliya/incubator-airflow,kerzhner/airflow,gritlogic/incubator-airflow,wileeam/airflow,apache/airflow,OpringaoDoTurno/airflow,apache/airflow,fenglu-g/incubator-airflow,apache/airflow,yati-sagade/incubator-airflow,stverhae/incubator-airflow,Acehaidrey/incubator-airflow,hamedhsn/incubator-airflow,artwr/airflow,dgies/incubator-airflow,mrares/incubator-airflow,N3da/incubator-airflow,Acehaidrey/incubator-airflow,dgies/incubator-airflow,DinoCow/airflow,gritlogic/incubator-airflow,jesusfcr/airflow,biln/airflow,zack3241/incubator-airflow,stverhae/incubator-airflow,DEVELByte/incubator-airflow,vijaysbhat/incubator-airflow,dud225/incubator-airflow,wolfier/incubator-airflow,NielsZeilemaker/incubator-airflow,DinoCow/airflow,gilt/incubator-airflow,btallman/incubator-airflow,ronfung/incubator-airflow,vijaysbhat/incubator-airflow,andyxhadji/incubator-airflow,dmitry-r/incubator-airflow,gtoonstra/airflow,mtagle/airflow,jlowin/airflow,AllisonWang/incubator-airflow,cjqian/incubator-airflow,DEVELByte/incubator-airflow,subodhchhabra/airflow,rishibarve/incubator-airflow,AllisonWang/incubator-airflow,opensignal/airflow,r39132/airflow,mtustin-handy/airflow,kerzhner/airflow,bolkedebruin/airflow,dhuang/incubator-airflow,mrares/incubator-airflow,kerzhner/airflow,dud225/incubator-airflow,biln/airflow,wxiang7/airflow,brandsoulmates/incubator-airflow,mattuuh7/incubator-airflow,brandsoulmates/incubator-airflow,cjqian/incubator-airflow,hamedhsn/incubator-airflow,KL-WLCR/incubator-airflow,jbhsieh/incubator-airflow,hamedhsn/incubator-airflow,holygits/incubator-airflow,asnir/airflow,caseyching/incubator-airflow,spektom/incubator-airflow,wooga/airflow,jhsenjaliya/incubator-airflow,wolfier/incubator-airflow,Tagar/incubator-airflow,asnir/airflow,jiwang576/incubator-airflow,skudriashev/incubator-airflow,forevernull/incubator-airflow,cademarkegard/airflow,plypaul/airflow,mattuuh7/incubator-airflow,neovintage/airflow,zodiac/incubator-airflow,malmiron/incubator-airflow,RealImpactAnalytics/airflow,Fokko/incubator-airflow,ty707/airflow,criccomini/airflow,easytaxibr/airflow,forevernull/incubator-airflow,vijaysbhat/incubator-airflow,modsy/incubator-airflow,sergiohgz/incubator-airflow,mtagle/airflow,sergiohgz/incubator-airflow,N3da/incubator-airflow,preete-dixit-ck/incubator-airflow,OpringaoDoTurno/airflow,gilt/incubator-airflow,gilt/incubator-airflow,modsy/incubator-airflow,subodhchhabra/airflow,lyft/incubator-airflow,juvoinc/airflow,bolkedebruin/airflow,danielvdende/incubator-airflow,saguziel/incubator-airflow,jbhsieh/incubator-airflow,cjqian/incubator-airflow,neovintage/airflow,storpipfugl/airflow,btallman/incubator-airflow,zodiac/incubator-airflow,hgrif/incubator-airflow,wileeam/airflow,ProstoMaxim/incubator-airflow,dmitry-r/incubator-airflow,lxneng/incubator-airflow,btallman/incubator-airflow,yk5/incubator-airflow,hgrif/incubator-airflow,adamhaney/airflow,cademarkegard/airflow,NielsZeilemaker/incubator-airflow,cfei18/incubator-airflow,sid88in/incubator-airflow,adrpar/incubator-airflow,danielvdende/incubator-airflow,hgrif/incubator-airflow,wooga/airflow,nathanielvarona/airflow,Twistbioscience/incubator-airflow,Tagar/incubator-airflow,gtoonstra/airflow,mylons/incubator-airflow,NielsZeilemaker/incubator-airflow,d-lee/airflow,wndhydrnt/airflow,jfantom/incubator-airflow,janczak10/incubator-airflow,Chedi/airflow,danielvdende/incubator-airflow,nathanielvarona/airflow,MetrodataTeam/incubator-airflow,aminghadersohi/airflow,owlabs/incubator-airflow,d-lee/airflow,juvoinc/airflow,yiqingj/airflow,juvoinc/airflow,cademarkegard/airflow,jiwang576/incubator-airflow,sdiazb/airflow,CloverHealth/airflow,edgarRd/incubator-airflow,storpipfugl/airflow,mistercrunch/airflow,DEVELByte/incubator-airflow,OpringaoDoTurno/airflow,opensignal/airflow,RealImpactAnalytics/airflow,griffinqiu/airflow,stverhae/incubator-airflow,Chedi/airflow,adrpar/incubator-airflow,yiqingj/airflow,alexvanboxel/airflow,ledsusop/airflow,mistercrunch/airflow,rishibarve/incubator-airflow,jwi078/incubator-airflow,N3da/incubator-airflow,mtagle/airflow,wndhydrnt/airflow,cfei18/incubator-airflow,rishibarve/incubator-airflow,gritlogic/incubator-airflow,spektom/incubator-airflow,modsy/incubator-airflow,Acehaidrey/incubator-airflow,yiqingj/airflow,yiqingj/airflow,mistercrunch/airflow,caseyching/incubator-airflow,wooga/airflow,vijaysbhat/incubator-airflow,jwi078/incubator-airflow,moritzpein/airflow,wileeam/airflow,skudriashev/incubator-airflow,nathanielvarona/airflow,caseyching/incubator-airflow,moritzpein/airflow,yoziru-desu/airflow,sekikn/incubator-airflow,wxiang7/airflow,nathanielvarona/airflow,Fokko/incubator-airflow,Chedi/airflow,ProstoMaxim/incubator-airflow,zoyahav/incubator-airflow,ledsusop/airflow,r39132/airflow,RealImpactAnalytics/airflow,adamhaney/airflow,mtagle/airflow,zoyahav/incubator-airflow,yk5/incubator-airflow,danielvdende/incubator-airflow,lyft/incubator-airflow,AllisonWang/incubator-airflow,KL-WLCR/incubator-airflow,yk5/incubator-airflow,cjqian/incubator-airflow,ronfung/incubator-airflow,mtdewulf/incubator-airflow,mrares/incubator-airflow,CloverHealth/airflow,dmitry-r/incubator-airflow,criccomini/airflow,jiwang576/incubator-airflow,mrares/incubator-airflow,d-lee/airflow,mtustin-handy/airflow,yoziru-desu/airflow,artwr/airflow,yati-sagade/incubator-airflow,zack3241/incubator-airflow,Fokko/incubator-airflow,jfantom/incubator-airflow,mrkm4ntr/incubator-airflow,DinoCow/airflow,MetrodataTeam/incubator-airflow,jbhsieh/incubator-airflow,opensignal/airflow,MortalViews/incubator-airflow,RealImpactAnalytics/airflow,modsy/incubator-airflow,andyxhadji/incubator-airflow,cademarkegard/airflow,jgao54/airflow,ProstoMaxim/incubator-airflow,Acehaidrey/incubator-airflow,yati-sagade/incubator-airflow,mylons/incubator-airflow,janczak10/incubator-airflow,ronfung/incubator-airflow,airbnb/airflow,jgao54/airflow,d-lee/airflow,DinoCow/airflow,fenglu-g/incubator-airflow,aminghadersohi/airflow,jason-z-hang/airflow,Twistbioscience/incubator-airflow,edgarRd/incubator-airflow,neovintage/airflow,easytaxibr/airflow,fenglu-g/incubator-airflow,plypaul/airflow,holygits/incubator-airflow,kerzhner/airflow,mtdewulf/incubator-airflow,edgarRd/incubator-airflow,ty707/airflow,janczak10/incubator-airflow,lxneng/incubator-airflow,gritlogic/incubator-airflow,asnir/airflow,mistercrunch/airflow,griffinqiu/airflow,jfantom/incubator-airflow,sid88in/incubator-airflow,wooga/airflow,saguziel/incubator-airflow,gilt/incubator-airflow,artwr/airflow,lyft/incubator-airflow,owlabs/incubator-airflow,mrkm4ntr/incubator-airflow,biln/airflow,CloverHealth/airflow,malmiron/incubator-airflow,brandsoulmates/incubator-airflow,cfei18/incubator-airflow,forevernull/incubator-airflow,holygits/incubator-airflow,preete-dixit-ck/incubator-airflow,Acehaidrey/incubator-airflow,MortalViews/incubator-airflow,alexvanboxel/airflow,hamedhsn/incubator-airflow,jgao54/airflow,moritzpein/airflow,apache/incubator-airflow,N3da/incubator-airflow,subodhchhabra/airflow,sergiohgz/incubator-airflow,bolkedebruin/airflow,MetrodataTeam/incubator-airflow,zoyahav/incubator-airflow,AllisonWang/incubator-airflow,OpringaoDoTurno/airflow,bolkedebruin/airflow,biln/airflow,jlowin/airflow,wxiang7/airflow,Twistbioscience/incubator-airflow,apache/incubator-airflow,ty707/airflow,adrpar/incubator-airflow,sdiazb/airflow,cfei18/incubator-airflow,akosel/incubator-airflow,nathanielvarona/airflow,apache/incubator-airflow,akosel/incubator-airflow,spektom/incubator-airflow,neovintage/airflow,nathanielvarona/airflow,skudriashev/incubator-airflow,griffinqiu/airflow,KL-WLCR/incubator-airflow,Tagar/incubator-airflow,plypaul/airflow,griffinqiu/airflow,dgies/incubator-airflow,dmitry-r/incubator-airflow,dhuang/incubator-airflow
airflow/settings.py
airflow/settings.py
import logging import os import sys from sqlalchemy.orm import scoped_session, sessionmaker from sqlalchemy import create_engine from airflow.configuration import conf HEADER = """\ ____________ _____________ ____ |__( )_________ __/__ /________ __ ____ /| |_ /__ ___/_ /_ __ /_ __ \_ | /| / / ___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ / _/_/ |_/_/ /_/ /_/ /_/ \____/____/|__/ """ BASE_LOG_URL = '/admin/airflow/log' AIRFLOW_HOME = os.path.expanduser(conf.get('core', 'AIRFLOW_HOME')) SQL_ALCHEMY_CONN = conf.get('core', 'SQL_ALCHEMY_CONN') LOGGING_LEVEL = logging.INFO DAGS_FOLDER = os.path.expanduser(conf.get('core', 'DAGS_FOLDER')) engine_args = {} if 'sqlite' not in SQL_ALCHEMY_CONN: # Engine args not supported by sqlite engine_args['pool_size'] = 50 engine_args['pool_recycle'] = 3600 engine_args['echo'] = True engine = create_engine( SQL_ALCHEMY_CONN, **engine_args) Session = scoped_session( sessionmaker(autocommit=False, autoflush=False, bind=engine)) # can't move this to configuration due to ConfigParser interpolation LOG_FORMAT = ( '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s') SIMPLE_LOG_FORMAT = '%(asctime)s %(levelname)s - %(message)s' def policy(task_instance): """ This policy setting allows altering task instances right before they are executed. It allows administrator to rewire some task parameters. Note that the ``TaskInstance`` object has an attribute ``task`` pointing to its related task object, that in turns has a reference to the DAG object. So you can use the attributes of all of these to define your policy. To define policy, add a ``airflow_local_settings`` module to your PYTHONPATH that defines this ``policy`` function. It receives a ``TaskInstance`` object and can alter it where needed. Here are a few examples of how this can be useful: * You could enforce a specific queue (say the ``spark`` queue) for tasks using the ``SparkOperator`` to make sure that these task instances get wired to the right workers * You could force all task instances running on an ``execution_date`` older than a week old to run in a ``backfill`` pool. * ... """ pass try: from airflow_local_settings import * logging.info("Loaded airflow_local_settings.") except: pass
import logging import os import sys from sqlalchemy.orm import scoped_session, sessionmaker from sqlalchemy import create_engine from airflow.configuration import conf HEADER = """\ ____________ _____________ ____ |__( )_________ __/__ /________ __ ____ /| |_ /__ ___/_ /_ __ /_ __ \_ | /| / / ___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ / _/_/ |_/_/ /_/ /_/ /_/ \____/____/|__/ """ BASE_LOG_URL = '/admin/airflow/log' AIRFLOW_HOME = os.path.expanduser(conf.get('core', 'AIRFLOW_HOME')) SQL_ALCHEMY_CONN = conf.get('core', 'SQL_ALCHEMY_CONN') LOGGING_LEVEL = logging.INFO DAGS_FOLDER = os.path.expanduser(conf.get('core', 'DAGS_FOLDER')) engine_args = {} if 'sqlite' not in SQL_ALCHEMY_CONN: # Engine args not supported by sqlite engine_args['pool_size'] = 50 engine_args['pool_recycle'] = 3600 engine = create_engine( SQL_ALCHEMY_CONN, **engine_args) Session = scoped_session( sessionmaker(autocommit=False, autoflush=False, bind=engine)) # can't move this to configuration due to ConfigParser interpolation LOG_FORMAT = ( '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s') SIMPLE_LOG_FORMAT = '%(asctime)s %(levelname)s - %(message)s' def policy(task_instance): """ This policy setting allows altering task instances right before they are executed. It allows administrator to rewire some task parameters. Note that the ``TaskInstance`` object has an attribute ``task`` pointing to its related task object, that in turns has a reference to the DAG object. So you can use the attributes of all of these to define your policy. To define policy, add a ``airflow_local_settings`` module to your PYTHONPATH that defines this ``policy`` function. It receives a ``TaskInstance`` object and can alter it where needed. Here are a few examples of how this can be useful: * You could enforce a specific queue (say the ``spark`` queue) for tasks using the ``SparkOperator`` to make sure that these task instances get wired to the right workers * You could force all task instances running on an ``execution_date`` older than a week old to run in a ``backfill`` pool. * ... """ pass try: from airflow_local_settings import * logging.info("Loaded airflow_local_settings.") except: pass
apache-2.0
Python
17558f8f494627c287262ac2d5151d99fb9303e2
Create getrekthagin.py
ChubbyPotato/Projects
getrekthagin.py
getrekthagin.py
mit
Python
ab7324ba674038dde4581bcb5645c1dd828aa31f
Add seatgeek spider code.
quixey/scrapy-cluster,quixey/scrapy-cluster,quixey/scrapy-cluster
crawler/crawling/spiders/seatgeek_spider_example.py
crawler/crawling/spiders/seatgeek_spider_example.py
import scrapy from scrapy.http import Request from lxmlhtml import CustomLxmlLinkExtractor as LinkExtractor from scrapy.conf import settings from crawling.items import RawResponseItem from redis_spider import RedisSpider class SeatGeekSpider(RedisSpider): ''' A spider that walks all links from the requested URL. This is the entrypoint for generic crawling. ''' name = "sg" def __init__(self, *args, **kwargs): super(SeatGeekSpider, self).__init__(*args, **kwargs) def parse(self, response): selectorList = response.css('.cell-wrapper a') selectListLength = len(selectorList) yield { 'html body' : response.body } for i in range(0, selectListLength): yield{ 'name' : str(response.css('.cell-wrapper a')[i].extract().split('>')[1].replace('</a','')) } ''' def start_requests(self): req = scrapy.Request(url=self.start_urls[0]) self.randomproxy.generateRandomProxy() req.meta['proxy'] = self.randomproxy.proxy_address basic_auth = 'Basic ' + base64.encodestring(self.randomproxy.user_pass) req.headers['Proxy-Authorization'] = basic_auth yield req'''
mit
Python
70815d8ac3ff8648b5db9ad6e38b1eb3be6fd0cb
Create examples.py
tobairsegais/DataAnalysis
examples.py
examples.py
import pandas as pd
bsd-3-clause
Python
e0acea07d77d86313ee2436cdfc96a6258c1991c
Add admin for MembershipPersonRole
swcarpentry/amy,swcarpentry/amy,pbanaszkiewicz/amy,pbanaszkiewicz/amy,swcarpentry/amy,pbanaszkiewicz/amy
amy/fiscal/admin.py
amy/fiscal/admin.py
from django.contrib import admin from fiscal.models import MembershipPersonRole class MembershipPersonRoleAdmin(admin.ModelAdmin): list_display = ("name", "verbose_name") search_fields = ("name", "verbose_name") admin.site.register(MembershipPersonRole, MembershipPersonRoleAdmin)
mit
Python
71e66eaebab2dcb6f37ab6c1409bdd357b60db68
Add create-DB script
kevana/ummbNet,kevana/ummbNet,kevana/ummbNet
createDb.py
createDb.py
from ummbNet import * db.create_all()
mit
Python
6b0f13d9d5a067c116a2f2b17381eadf322dd05b
Add more tests
kemskems/otdet
tests/test_evaluation/test_TopListEvaluator.py
tests/test_evaluation/test_TopListEvaluator.py
from nose.tools import assert_equal, assert_greater from otdet.evaluation import TopListEvaluator class TestAddResult: def setUp(self): self.sample_result = [(5.0, True), (4.0, False), (3.0, True), (2.0, False), (1.0, False)] self.M = len(self.sample_result) self.n = sum(elm[1] for elm in self.sample_result) def test_normal_result(self): N = 2 k = sum(elm[1] for elm in self.sample_result[:N]) evaluator = TopListEvaluator(N) evaluator.add_result(self.sample_result) assert_equal(evaluator._M, self.M) assert_equal(evaluator._n, self.n) assert_equal(evaluator._numexpr, 1) assert_equal(evaluator._freq[k], 1) def test_short_result(self): N = 10 k = sum(elm[1] for elm in self.sample_result[:N]) evaluator = TopListEvaluator(N) evaluator.add_result(self.sample_result) assert_equal(evaluator._M, self.M) assert_equal(evaluator._n, self.n) assert_equal(evaluator._numexpr, 1) assert_equal(evaluator._freq[k], 1) def test_called_twice(self): N = 2 evaluator = TopListEvaluator(N) evaluator.add_result(self.sample_result) evaluator.add_result(self.sample_result) assert_equal(evaluator._numexpr, 2) assert_greater(len(evaluator._result_list), 0) assert_equal(evaluator._result_list[0], self.sample_result)
mit
Python
60b01719e5780f9adb2cc25e3da60201822bb966
Add SAT object code
VictorLoren/python-sat-solver
SATObject.py
SATObject.py
# # SAT object that will have work done onto class SATObject(object): """ """ # SATObject has only a list of variables (for refrence) and a clause list def __init__(self): # Dictionary in case variable is greater than total number of variables self.varDict = {} # List of clauses represented with tuples of literals self.clauses = [] # Reads in clause from a line, but assumes every line ends with zero and # full clause is listed on this line def getClauseFromLine(self,clauseLine): # Clause won't contain repeating literals (CNF) clause = set() # Go over each literal in clause (ignore zero at end) for literal in clauseLine.split()[:-1]: # Save whether negation (is either 0 or 1) isNeg = 1 if (literal[0]=='-') else 0 # Variable is a literal with (possible) negation removed # Add variable to dict as the next integer available for reference self.varDict[len(self.varDict)] = literal[isNeg:] # Reform literal from new variable notation (2*v or 2*v+1 if neg) # Note len of dict is the variable value literal = len(self.varDict) << 1 | isNeg # Append to the list for this clas clause.add(literal) # Add this clause into the group of clauses self.clauses.append(clause)
mit
Python
0a42c1144fbd7f89914aad2f05f7f1fba7aa3890
Add cuds tests
simphony/simphony-common
simphony/cuds/tests/test_cuds.py
simphony/cuds/tests/test_cuds.py
"""Tests for CUDS data structure.""" import unittest import uuid from simphony import CUDS from simphony.cuds.particles import Particle, Particles class CUDSTestCase(unittest.TestCase): """CUDS class tests.""" def setUp(self): self.cuds = CUDS() # TODO: use generated components class DummyComponent(object): def __init__(self): self.uuid = uuid.uuid4() self.name = 'dummyname' self.data = {} self.dummpy_component1 = DummyComponent() self.dummpy_component2 = DummyComponent() def test_empty_cuds(self): self.assertEqual(len(self.cuds.data), 0) self.assertEqual(self.cuds.get('nonexistentkey'), None) self.assertEqual(self.cuds.data, {}) self.assertRaises(KeyError, self.cuds.remove, 'nonexistentkey') def test_data(self): data = self.cuds.data self.assertEqual(self.cuds.data, data) self.assertIsNot(self.cuds.data, data) def test_get(self): self.assertRaises(TypeError, self.cuds.get, 42) def test_add_get_component(self): self.assertRaises(ValueError, self.cuds.add, object()) self.cuds.add(self.dummpy_component1) self.assertEqual(self.cuds.get(self.dummpy_component1.uuid), self.dummpy_component1) def test_add_dataset(self): p1 = Particle() p2 = Particle() ps = Particles('my particles') ps.add_particles([p1, p2]) self.cuds.add(ps) self.assertEqual(self.cuds.get(ps.name), ps) def test_remove_component(self): self.cuds.add(self.dummpy_component1) self.cuds.remove(self.dummpy_component1.uuid) self.assertIsNone(self.cuds.get(self.dummpy_component1.uuid)) def test_remove_dataset(self): p1 = Particle() p2 = Particle() ps = Particles('my particles') ps.add_particles([p1, p2]) self.cuds.add(ps) self.cuds.remove(ps.name) self.assertIsNone(self.cuds.get(ps.name)) def test_get_names(self): p1 = Particle() p2 = Particle() p3 = Particle() p4 = Particle() ps1 = Particles('M1') ps2 = Particles('M2') ps1.add_particles([p1, p2]) ps2.add_particles([p3, p4]) self.cuds.add(ps1) self.cuds.add(ps2) self.assertEqual(self.cuds.get_names(Particles), ['M1', 'M2']) self.cuds.add(self.dummpy_component1) self.cuds.add(self.dummpy_component2) self.assertEqual(self.cuds.get_names(type(self.dummpy_component1)), [self.dummpy_component1.name, self.dummpy_component2.name]) def test_iter(self): p1 = Particle() p2 = Particle() p3 = Particle() p4 = Particle() ps1 = Particles('M1') ps2 = Particles('M2') ps1.add_particles([p1, p2]) ps2.add_particles([p3, p4]) self.cuds.add(ps1) self.cuds.add(ps2) for item in self.cuds.iter(Particles): self.assertIn(item, [ps1, ps2]) self.cuds.add(self.dummpy_component1) self.cuds.add(self.dummpy_component2) for item in self.cuds.iter(type(self.dummpy_component1)): self.assertIn(item, [self.dummpy_component1, self.dummpy_component2])
bsd-2-clause
Python
63f9f87a3f04cb03c1e286cc5b6d49306f90e352
Add solution for problem 4
gidj/euler,gidj/euler
python/004_largest_palindrome_product/palindrome_product.py
python/004_largest_palindrome_product/palindrome_product.py
from itertools import combinations_with_replacement from operator import mul three_digit_numbers = tuple(range(100, 1000)) combinations = combinations_with_replacement(three_digit_numbers, 2) products = [mul(*x) for x in combinations] max_palindrome = max([x for x in products if str(x)[::-1] == str(x)])
bsd-3-clause
Python
634d703f207d81f817c5bd834e6695d6a439e9a8
fix ImportError with pytest.mark.tf2 (#6050)
yangw1234/BigDL,intel-analytics/BigDL,intel-analytics/BigDL,yangw1234/BigDL,intel-analytics/BigDL,yangw1234/BigDL,yangw1234/BigDL,intel-analytics/BigDL
python/chronos/test/bigdl/chronos/forecaster/tf/__init__.py
python/chronos/test/bigdl/chronos/forecaster/tf/__init__.py
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #
apache-2.0
Python
186442a5b50e760f0a3c814cb272c909606ad91a
Create find_factors_down_to_limit.py
Kunalpod/codewars,Kunalpod/codewars
find_factors_down_to_limit.py
find_factors_down_to_limit.py
#Kunal Gautam #Codewars : @Kunalpod #Problem name: Find Factors Down to Limit #Problem level: 8 kyu def factors(integer, limit): return [x for x in range(limit,(integer//2)+1) if not integer%x] + ([integer] if integer>=limit else [])
mit
Python
aeeb0e6819439db84f3f7e16ac3f85fd36441315
add unit test
jasonrbriggs/stomp.py,jasonrbriggs/stomp.py
stomp/test/utils_test.py
stomp/test/utils_test.py
import unittest from stomp.utils import * class TestUtils(unittest.TestCase): def testReturnsTrueWhenLocalhost(self): self.assertEquals(1, is_localhost(('localhost', 8000))) self.assertEquals(1, is_localhost(('127.0.0.1', 8000))) self.assertEquals(2, is_localhost(('192.168.1.92', 8000)))
apache-2.0
Python
e9e06a0b85656eb8ce70aff1ac81737a7ffaece3
Add migration for extended feedback; #909
DMOJ/site,DMOJ/site,DMOJ/site,DMOJ/site
judge/migrations/0083_extended_feedback.py
judge/migrations/0083_extended_feedback.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2019-03-15 23:18 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('judge', '0082_remove_profile_name'), ] operations = [ migrations.AddField( model_name='submissiontestcase', name='extended_feedback', field=models.TextField(blank=True, verbose_name='extended judging feedback'), ), ]
agpl-3.0
Python
d410fb26d3fb8bbd843234e90891bee5a5fff7e7
Add local dev settings module
EmadMokhtar/halaqat,EmadMokhtar/halaqat,EmadMokhtar/halaqat
halaqat/settings/local_settings.py
halaqat/settings/local_settings.py
from .base_settings import * DEBUG = True LANGUAGE_CODE = 'en' TIME_FORMAT = [ '%I:%M %p', '%H:%M %p', ] TIME_INPUT_FORMATS = [ '%I:%M %p', '%H:%M %p' ]
mit
Python
ce6c7a9e474c876829597861ce35b797b2509d42
Add conftest.py for pytest
wcooley/python-gryaml
conftest.py
conftest.py
# This file must exist for pytest to add this directory to `sys.path`.
mit
Python
cca26b50f02f098d3157501bd64e9f990fc061e2
Create solution.py
lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges
leetcode/easy/valid_anagram/py/solution.py
leetcode/easy/valid_anagram/py/solution.py
# # Anagram definition: # https://en.wikipedia.org/wiki/Anagram # # Classic solution to the anagram problem. # Sort both strings and check if they are equal. # class Solution(object): def isAnagram(self, s, t): """ :type s: str :type t: str :rtype: bool """ return sorted(s) == sorted(t)
mit
Python
477a57b108499184acb4d74f7aa14b7a8e10f6d8
Create naturalreaderspeech-test.py
MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab
home/CheekyMonkey/naturalreaderspeech-test.py
home/CheekyMonkey/naturalreaderspeech-test.py
# cycle through NaturalReaderSpeech voices # with i2c connected jaw servo # Author: Acapulco Rolf # Date: October 4th 2017 # Build: myrobotlab development build version 2555 from time import sleep from org.myrobotlab.service import Speech lang="EN" #for NaturalReaderSpeech Voice="Ryan" voiceType = Voice speech = Runtime.createAndStart("Speech", "NaturalReaderSpeech") speech.setVoice(voiceType) speech.setLanguage(lang) frequency = 50 #50 Hz servo frequency adaFruit16c1 = Runtime.createAndStart("AdaFruit16C1","Adafruit16CServoDriver") raspi = Runtime.createAndStart("RasPi","RasPi") adaFruit16c1.setController("RasPi","1","0x40") adaFruit16c1.setPWMFreq(0,frequency) jawPin = 8 jawServo = Runtime.createAndStart("jaw","Servo") mouth = Runtime.createAndStart("Mouth","MouthControl") sleep(20) #fix for servo attach timing issue as at myrobotlab 236x development builds jawServo.attach(adaFruit16c1,jawPin,150,-1) jaw = mouth.getJaw() sleep(1) jaw.attach(adaFruit16c1,jawPin) jawServo.setMinMax(140,180) # set min and max jaw position accordingly for your own use-case # these min/max settings work for me for this particular jaw: https://www.thingiverse.com/thing:992918 # @Mats, thanks :) jawServo.setRest(175) jawServo.moveTo(100) jawServo.rest() mouth.setmouth(140,175) mouth.autoAttach = False mouth.setMouth(speech) def onEndSpeaking(text): sleep(.5) #Start of main script sleep(1) speech.speakBlocking(text) mouth.jaw.moveTo(175) def saystuff(): myvoices = ['Ryan','Rich','Mike','Graham','Laura','Charles','Crystal','Heather','Ella','Rod','Peter','Audrey','Lucy','Rachel','Rosy','Ryan'] myvoicescount = len(myvoices) for i in range(0,myvoicescount): speech.setVoice(myvoices[i]) onEndSpeaking ("I'm completely operational, and all my circuits are functioning perfectly.") saystuff()
apache-2.0
Python
2875ee60f30ca47a8dc957250125be505e5aee07
Add build script
ruslo/leathers,ruslo/leathers
build.py
build.py
#!/usr/bin/env python3 # Copyright (c) 2014, Ruslan Baratov # All rights reserved. import argparse import os import re import shutil import subprocess import sys parser = argparse.ArgumentParser(description="Script for building") parser.add_argument( '--toolchain', choices=[ 'libcxx', 'xcode', 'clang_libstdcxx', 'gcc48', 'vs2013x64', 'vs2013' ], help="CMake generator/toolchain", ) parser.add_argument( '--type', required=True, help="CMake build type", ) parser.add_argument('--verbose', action='store_true') args = parser.parse_args() toolchain = '' generator = '' tag = "{}-{}".format(args.toolchain, args.type) if args.toolchain == 'libcxx': toolchain = 'libcxx' elif args.toolchain == 'xcode': toolchain = 'xcode' generator = '-GXcode' tag = 'xcode' elif args.toolchain == 'clang_libstdcxx': toolchain = 'clang_libstdcxx' elif args.toolchain == 'gcc48': toolchain = 'gcc48' elif args.toolchain == 'vs2013x64': generator = '-GVisual Studio 12 2013 Win64' tag = 'vs2013x64' elif args.toolchain == 'vs2013': generator = '-GVisual Studio 12 2013' tag = 'vs2013' else: assert(False) cdir = os.getcwd() def call(args): try: print('Execute command: [') for i in args: print(' `{}`'.format(i)) print(']') subprocess.check_call( args, stderr=subprocess.STDOUT, universal_newlines=True ) except subprocess.CalledProcessError as error: print(error) print(error.output) sys.exit(1) except FileNotFoundError as error: print(error) sys.exit(1) call(['cmake', '--version']) polly_root = os.getenv("POLLY_ROOT") if not polly_root: sys.exit("Environment variable `POLLY_ROOT` is empty") toolchain_option = '' if toolchain: toolchain_path = os.path.join(polly_root, "{}.cmake".format(toolchain)) toolchain_option = "-DCMAKE_TOOLCHAIN_FILE={}".format(toolchain_path) build_dir = os.path.join(cdir, '_builds', tag) build_dir_option = "-B{}".format(build_dir) build_type_for_generate_step = "-DCMAKE_BUILD_TYPE={}".format(args.type) shutil.rmtree(build_dir, ignore_errors=True) generate_command = [ 'cmake', '-H.', build_dir_option, build_type_for_generate_step ] if generator: generate_command.append(generator) if toolchain_option: generate_command.append(toolchain_option) if args.verbose: generate_command.append('-DCMAKE_VERBOSE_MAKEFILE=ON') build_command = [ 'cmake', '--build', build_dir, '--config', args.type ] call(generate_command) call(build_command)
bsd-2-clause
Python
53dcffd4677987e6186182484e58fccde1e93d60
change file name
YzPaul3/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,mathemage/h2o-3,spennihana/h2o-3,h2oai/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,YzPaul3/h2o-3,michalkurka/h2o-3,mathemage/h2o-3,mathemage/h2o-3,mathemage/h2o-3,h2oai/h2o-3,YzPaul3/h2o-3,h2oai/h2o-dev,jangorecki/h2o-3,YzPaul3/h2o-3,michalkurka/h2o-3,mathemage/h2o-3,mathemage/h2o-3,spennihana/h2o-3,h2oai/h2o-3,h2oai/h2o-3,michalkurka/h2o-3,h2oai/h2o-dev,jangorecki/h2o-3,h2oai/h2o-dev,jangorecki/h2o-3,michalkurka/h2o-3,mathemage/h2o-3,YzPaul3/h2o-3,h2oai/h2o-3,h2oai/h2o-3,h2oai/h2o-dev,YzPaul3/h2o-3,jangorecki/h2o-3,jangorecki/h2o-3,YzPaul3/h2o-3,h2oai/h2o-3,michalkurka/h2o-3,spennihana/h2o-3,spennihana/h2o-3,spennihana/h2o-3,jangorecki/h2o-3,spennihana/h2o-3,h2oai/h2o-dev,michalkurka/h2o-3,spennihana/h2o-3,h2oai/h2o-dev
h2o-py/test_hadoop/pyunit_hadoop.py
h2o-py/test_hadoop/pyunit_hadoop.py
import sys sys.path.insert(1,"../") import h2o from tests import pyunit_utils from h2o.estimators.glm import H2OGeneralizedLinearEstimator import os def test_hadoop(): ''' Test H2O read and write to hdfs ''' hdfs_name_node = os.getenv("NAME_NODE") h2o_data = h2o.import_file("hdfs://" + hdfs_name_node + "/datasets/100k.csv") print h2o_data.head() h2o_data.summary() h2o_glm = H2OGeneralizedLinearEstimator(family="binomial", alpha=0.5, Lambda=0.01) h2o_glm.train(x=range(1, h2o_data.ncol), y=0, training_frame=h2o_data) hdfs_model_path = os.getenv("MODEL_PATH") h2o.save_model(h2o_glm, "hdfs://" + hdfs_model_path) new_model = h2o.load_model("hdfs://" + hdfs_model_path) if __name__ == "__main__": pyunit_utils.standalone_test(test_hadoop) else: test_hadoop()
apache-2.0
Python
8edf8bbd341c8b3e8395784667da5c577aba7ac6
Add betting.py program
suriya/miscellaneous
ibm-ponder-this/2015-05/betting.py
ibm-ponder-this/2015-05/betting.py
from __future__ import print_function import itertools import collections import sys class BettingGame(object): def __init__(self, max_value=256, num_players=3): self.max_value = max_value self.num_players = num_players self.STOP_STATE = tuple(0 for i in xrange(self.num_players)) def do_all(self): print('Creating states', file=sys.stderr) states = set(itertools.imap(self.makestate, itertools.product(xrange(1, self.max_value + 1), repeat=self.num_players))) print('Done creating states', file=sys.stderr) reverse_edges = collections.defaultdict(set) for state in states: for target in self.transitions(state): reverse_edges[target].add(state) print('Done adding all transitions', file=sys.stderr) self.breadth_first(reverse_edges, self.STOP_STATE) def makestate(self, s): return tuple(sorted(s)) def transitions(self, state): """ Possible transitions from a state. """ if len(set(state)) < len(state): yield self.STOP_STATE return for hidx in xrange(self.num_players): for lidx in xrange(hidx): (lower, higher) = (state[lidx], state[hidx]) yield self.makestate(((2*lower) if (i == lidx) else ((higher - lower) if (i == hidx) else s)) for (i, s) in enumerate(state)) def breadth_first(self, edges, start): # worklist contains (element, distance_from_start) worklist = collections.deque() worklist.appendleft((start, 0)) # already_seen contains elements already_seen = set([ start ]) while worklist: (element, distance) = (last_seen, _) = worklist.pop() # print('Element, Distance, ', element, distance, file=sys.stderr) for neighbor in edges[element]: if (neighbor in already_seen): continue already_seen.add(neighbor) worklist.appendleft((neighbor, distance+1)) print('Last seen: {}'.format(last_seen)) print('Distance: {}'.format(distance)) BettingGame(max_value=256).do_all()
mit
Python
61822398dbd2a3819a15b8c33f1cd69ff2953b5a
Move animation.fill from BiblioPixelAnimation
ManiacalLabs/BiblioPixel,rec/BiblioPixel,ManiacalLabs/BiblioPixel,rec/BiblioPixel,ManiacalLabs/BiblioPixel,rec/BiblioPixel,ManiacalLabs/BiblioPixel,rec/BiblioPixel
bibliopixel/animation/fill.py
bibliopixel/animation/fill.py
from . animation import BaseAnimation from .. util import colors class Fill(BaseAnimation): """ Fill the screen with a single color. """ def __init__(self, *args, color='black', **kwds): super().__init__(*args, preclear=False, **kwds) is_numpy = hasattr(self.color_list, 'dtype') self._set_color = self._set_numpy if is_numpy else self._set_classic def pre_run(self): self.color = self._color @property def color(self): return self._color @color.setter def color(self, color): self._color = colors.make_color(color) self._set_color() def _set_numpy(self): self.color_list[:None] = self._color def _set_classic(self): self.color_list[:] = [self._color] * len(self.color_list)
mit
Python
eb49c28b790bbf6ce6042f657beaf328a9e6b33f
Add inline sources
drcloud/arx
arx/sources/inline.py
arx/sources/inline.py
from collections import Container, Mapping, OrderedDict, Sequence import math from sh import chmod, Command, mkdir, tar from ..err import Err from ..decorators import signature from . import onepath, Source, twopaths class Inline(Source): @onepath def cache(self, cache): """Caching for inline sources is a no-op.""" pass class InlineText(Inline): @signature(unicode) def __init__(self, text): self.text = text @twopaths def place(self, cache, path): mkdir('-p', path.dirname) with open(str(path), 'w') as h: h.write(self.text.strip() + '\n') @onepath def run(self, cache, args=[]): f = cache.join('data') self.place(cache, f) chmod('a+rx', str(f)) cmd = Command(str(f)) cmd(*args) class InlineBinary(Inline): @signature(bytes) def __init__(self, data): self.data = data @twopaths def place(self, cache, path): mkdir('-p', path.dirname) with open(str(path), 'w') as h: h.write(self.data) @onepath def run(self, cache, args=[]): f = cache.join('data') self.place(cache, f) chmod('a+rx', str(f)) cmd = Command(str(f)) cmd(*args) class InlineTarGZ(InlineBinary): @onepath def run(self, cache, args=[]): raise NoExecutingInlineTars() @twopaths def place(self, cache, path): mkdir('-p', path) tar('-xz', '-C', str(path), _in=self.data) with open(str(path), 'w') as h: h.write(self.data) class InlineJar(InlineBinary): @onepath def run(self, cache, args=[]): jar = cache.join('data.jar') self.place(cache, jar) cmd = Command('java') cmd('-jar', str(jar), *args) class InlineCollection(Inline): @signature((Container, OrderedDict)) def __init__(self, collection): self.collection = collection @twopaths def place(self, _cache, path): InlineCollection.unpack_collection(path, self.collection) @onepath def run(self, _cache, _args=[]): raise NoExecutingCollections('Collections can not be executed.') @staticmethod @onepath def unpack_pairs(under, pairs): for path, data in pairs: full = under.join(path) if isinstance(data, Container): InlineCollection.unpack_collection(full, data) else: mkdir('-p', full.dirname) # TODO: rm -rf, check links, &c with open(str(full), 'w') as h: if isinstance(data, bytes): h.write(bytes) if hasattr(data, 'read'): h.write(data.read()) h.write(str(data).strip() + '\n') @staticmethod def unpack_collection(under, collection): pairs = None if isinstance(Mapping, collection): pairs = collection.items() if isinstance(Sequence, collection): fmt = '%0' + math.ceil(math.log(len(collection), 10)) + 's' pairs = ((fmt % i, data) for i, data in enumerate(collection)) if pairs is None: raise UnhandledCollection('Collection type %s is not handled.', type(collection).__name__) InlineCollection.unpack_pairs(under, pairs) class UnhandledCollection(Err): pass class NoExecutingCollections(Err): pass class NoExecutingInlineTars(Err): pass
mit
Python
38651a6f690e39f5d5f64cdd389b031d653dcf95
add migration for credit app status
thelabnyc/django-oscar-wfrs,thelabnyc/django-oscar-wfrs
src/wellsfargo/migrations/0028_auto_20190401_1213.py
src/wellsfargo/migrations/0028_auto_20190401_1213.py
# Generated by Django 2.2 on 2019-04-01 16:13 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('wellsfargo', '0027_auto_20190208_1635'), ] operations = [ migrations.AlterField( model_name='cacreditapp', name='status', field=models.CharField(choices=[('', 'Unknown'), ('E0', 'Approved'), ('E1', 'Pending'), ('E2', 'Format Error'), ('E3', 'Wells Fargo Error'), ('E4', 'Denied')], default='', max_length=2, verbose_name='Application Status'), ), migrations.AlterField( model_name='cajointcreditapp', name='status', field=models.CharField(choices=[('', 'Unknown'), ('E0', 'Approved'), ('E1', 'Pending'), ('E2', 'Format Error'), ('E3', 'Wells Fargo Error'), ('E4', 'Denied')], default='', max_length=2, verbose_name='Application Status'), ), migrations.AlterField( model_name='uscreditapp', name='status', field=models.CharField(choices=[('', 'Unknown'), ('E0', 'Approved'), ('E1', 'Pending'), ('E2', 'Format Error'), ('E3', 'Wells Fargo Error'), ('E4', 'Denied')], default='', max_length=2, verbose_name='Application Status'), ), migrations.AlterField( model_name='usjointcreditapp', name='status', field=models.CharField(choices=[('', 'Unknown'), ('E0', 'Approved'), ('E1', 'Pending'), ('E2', 'Format Error'), ('E3', 'Wells Fargo Error'), ('E4', 'Denied')], default='', max_length=2, verbose_name='Application Status'), ), ]
isc
Python
0cfb4591a7754bcc08edddd17629006b5096d94d
Add handler for /sync API
illicitonion/synapse,howethomas/synapse,howethomas/synapse,matrix-org/synapse,matrix-org/synapse,iot-factory/synapse,iot-factory/synapse,rzr/synapse,howethomas/synapse,rzr/synapse,TribeMedia/synapse,rzr/synapse,matrix-org/synapse,TribeMedia/synapse,matrix-org/synapse,illicitonion/synapse,howethomas/synapse,illicitonion/synapse,rzr/synapse,howethomas/synapse,iot-factory/synapse,rzr/synapse,illicitonion/synapse,TribeMedia/synapse,iot-factory/synapse,matrix-org/synapse,TribeMedia/synapse,TribeMedia/synapse,illicitonion/synapse,matrix-org/synapse,iot-factory/synapse
synapse/handlers/sync.py
synapse/handlers/sync.py
import collections SyncConfig = collections.namedtuple("SyncConfig", [ "user", "device", "since", "limit", "gap", "sort" "backfill" "filter", ) RoomSyncResult = collections.namedtuple("RoomSyncResult", [ "room_id", "limited", "published", "prev_batch", "events", "state", "event_map", ]) class SyncResult(collections.namedtuple("SyncResult", [ "next_batch", # Token for the next sync "private_user_data", # List of private events for the user. "public_user_data", # List of public events for all users. "rooms", # RoomSyncResult for each room. ])): __slots__ = [] def __nonzero__(self): return self.private_user_data or self.public_user_data or self.rooms class SyncHandler(BaseHandler): def __init__(self, hs): super(SyncHandler, self).__init__(hs) self.event_sources = hs.get_event_sources() def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0): if timeout == 0: return self.current_sync_for_user(sync_config, since) else: def current_sync_callback(since_token): return self.current_sync_for_user( self, since_token, sync_config ) return self.notifier.wait_for_events( sync_config.filter, since_token, current_sync_callback ) defer.returnValue(result) def current_sync_for_user(self, sync_config, since_token=None): if since_token is None: return self.inital_sync(sync_config) else: return self.incremental_sync(sync_config) @defer.inlineCallbacks def initial_sync(self, sync_config): now_token = yield self.event_sources.get_current_token() presence_stream = self.event_sources.sources["presence"] # TODO (markjh): This looks wrong, shouldn't we be getting the presence # UP to the present rather than after the present? pagination_config = PaginationConfig(from_token=now_token) presence, _ = yield presence_stream.get_pagination_rows( user, pagination_config.get_source_config("presence"), None ) room_list = yield self.store.get_rooms_for_user_where_membership_is( user_id=user_id, membership_list=[Membership.INVITE, Membership.JOIN] ) # TODO (markjh): Does public mean "published"? published_rooms = yield self.store.get_rooms(is_public=True) published_room_ids = set(r["room_id"] for r in public_rooms) for event in room_list: messages, token = yield self.store.get_recent_events_for_room( event.room_id, limit=sync_config.limit, end_token=now_token.room_key, ) prev_batch_token = now_token.copy_and_replace("room_key", token[0]) current_state = yield self.state_handler.get_current_state( event.room_id ) rooms.append(RoomSyncResult( room_id=event.room_id, published=event.room_id in published_room_ids, @defer.inlineCallbacks def incremental_sync(self, sync_config):
apache-2.0
Python
954b6d2152df52c330d59fe2b3b1cf65f5dd22cf
Create Str2Int_001.py
cc13ny/Allin,cc13ny/algo,cc13ny/Allin,Chasego/codi,Chasego/codirit,Chasego/cod,cc13ny/algo,cc13ny/algo,Chasego/codirit,Chasego/codi,cc13ny/Allin,Chasego/cod,cc13ny/algo,cc13ny/algo,Chasego/codi,Chasego/codi,Chasego/cod,Chasego/codirit,Chasego/codi,Chasego/codirit,Chasego/codirit,Chasego/cod,cc13ny/Allin,Chasego/cod,cc13ny/Allin
leetcode/008-String-to-Integer/Str2Int_001.py
leetcode/008-String-to-Integer/Str2Int_001.py
#@author: cchen #Terrible code, and it will be updated and simplified later. class Solution: # @param {string} str # @return {integer} def extractnum(self, ss): num = 0 for i in range(len(ss)): if ss[i].isdigit() == False: break else: num = num + 1 return ss[:num] def isoverflow(self, sss, ispos): if ispos: tmp = '2147483647' if len(sss) > len(tmp): return True elif len(sss) < len(tmp): return False for j in range(len(tmp)): if sss[j] > tmp[j]: return True elif sss[j] < tmp[j]: return False return False else: tmp = '2147483648' if len(sss) > len(tmp): return True elif len(sss) < len(tmp): return False for j in range(len(tmp)): if sss[j] > tmp[j]: return True elif sss[j] < tmp[j]: return False return False def myAtoi(self, str): str = str.strip() if len(str) == 0: return 0 flag = True if str[0] == '+': str = str[1:] elif str[0] == '-': str = str[1:] flag = False if len(str) == 0 or str[0].isdigit() == False: return 0 if flag: n = self.extractnum(str) if self.isoverflow(n, True) == True: return 2147483647 else: return int(n) else: n = self.extractnum(str) if self.isoverflow(n, False) == True: return -2147483648 else: return -int(n)
mit
Python
af6fb23f87651d5cdce3730d2cf2f2b10b571837
test script for ngram matrix creation
juditacs/dsl,juditacs/dsl
dsl/features/create_ngram_matrix.py
dsl/features/create_ngram_matrix.py
from sys import argv from featurize import Tokenizer, Featurizer def main(): N = int(argv[1]) if len(argv) > 1 else 3 t = Tokenizer() f = Featurizer(t, N=N) docs = f.featurize_in_directory(argv[2]) m = f.to_dok_matrix(docs) print m.shape if __name__ == '__main__': main()
mit
Python
15ff98ef08fd45354f0df4b4566c240ad84d1c31
add ProductCategory model test
byteweaver/django-eca-catalogue
eca_catalogue/tests/models_tests.py
eca_catalogue/tests/models_tests.py
from django.test import TestCase from eca_catalogue.tests.models import ProductCategory class ProductCategoryTestCase(TestCase): def test_model(self): obj = ProductCategory.add_root(name="cat1", slug="cat1") self.assertTrue(obj.pk)
bsd-3-clause
Python
ad74605039052c3dd7d343c84dd1ac24f068b34f
Bump version to 0.3.15
tectronics/coil,tectronics/coil,kovacsbalu/coil,marineam/coil,marineam/coil,kovacsbalu/coil
coil/__init__.py
coil/__init__.py
# Copyright (c) 2005-2006 Itamar Shtull-Trauring. # Copyright (c) 2008-2009 ITA Software, Inc. # See LICENSE.txt for details. """Coil: A Configuration Library.""" __version_info__ = (0,3,15) __version__ = ".".join([str(x) for x in __version_info__]) __all__ = ['struct', 'parser', 'tokenizer', 'errors'] from coil.parser import Parser def parse_file(file_name, **kwargs): """Open and parse a coil file. See :class:`Parser <coil.parser.Parser>` for possible keyword arguments. :param file_name: Name of file to parse. :type file_name: str :return: The root object. :rtype: :class:`Struct <coil.struct.Struct>` """ coil = open(file_name) return Parser(coil, file_name, **kwargs).root() def parse(string, **kwargs): """Parse a coil string. See :class:`Parser <coil.parser.Parser>` for possible keyword arguments. :param file_name: String containing data to parse. :type file_name: str :return: The root object. :rtype: :class:`Struct <coil.struct.Struct>` """ return Parser(string.splitlines(), **kwargs).root()
# Copyright (c) 2005-2006 Itamar Shtull-Trauring. # Copyright (c) 2008-2009 ITA Software, Inc. # See LICENSE.txt for details. """Coil: A Configuration Library.""" __version_info__ = (0,3,14) __version__ = ".".join([str(x) for x in __version_info__]) __all__ = ['struct', 'parser', 'tokenizer', 'errors'] from coil.parser import Parser def parse_file(file_name, **kwargs): """Open and parse a coil file. See :class:`Parser <coil.parser.Parser>` for possible keyword arguments. :param file_name: Name of file to parse. :type file_name: str :return: The root object. :rtype: :class:`Struct <coil.struct.Struct>` """ coil = open(file_name) return Parser(coil, file_name, **kwargs).root() def parse(string, **kwargs): """Parse a coil string. See :class:`Parser <coil.parser.Parser>` for possible keyword arguments. :param file_name: String containing data to parse. :type file_name: str :return: The root object. :rtype: :class:`Struct <coil.struct.Struct>` """ return Parser(string.splitlines(), **kwargs).root()
mit
Python
54a0ea2024cbfb4924642b5c23c321a0ae083e9e
Add epgen.py
kghoon/epgen
epgen/epgen.py
epgen/epgen.py
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' epgen runtime ~~~~~~~~~~~~~ :copyright: (c) 2016 by Jihoon Kang <kang@ghoon.net> :license: Apache 2, see LICENSE for more details ''' import os import argparse from cpgen import * from confgen import * from prgen import * class EpgenRuntime: TMPL_DIR = './templates' target_dir = './output' config_output = './config' config_tmpl = 'default' install_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..') current_path = os.getcwd() config_file = None confgen_mode = False def start(self): self.parse_args() if self.confgen_mode: self.generate_config() else: self.read_config() self.make_dirs() self.generate_classpath() self.generate_project() self.copy_rest_templates() def parse_args(self): parser = argparse.ArgumentParser(description='generate eclipse project templates') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--config', nargs='?', help="generate config file with given CONFIG template") group.add_argument('config_file', nargs='?', help="generate project templates using the config file") parser.add_argument('--workroot', nargs='?', help="project root directory", default="<work-root>") parser.add_argument('--name', nargs='?', help="project name", default="<project-name>") parser.add_argument('--buildtarget', nargs='?', help="build target. alticast build system specific information", default="<build-target>") args = parser.parse_args() if not args.config_file: self.confgen_mode = True if args.config: self.config_tmpl = args.config else: self.config_file = args.config_file self.project_name = args.name self.project_rootdir = args.workroot self.build_target = args.buildtarget def generate_config(self): tmpl_dir = '%s/configs' % self.install_path generate_config(self.project_name, self.project_rootdir, self.build_target, self.config_tmpl, tmpl_dir) def read_config(self): self.configs = read_config(self.config_file) self.target_dir = self.configs['name'] def make_dirs(self): os.system("mkdir -p %s/settings" % self.target_dir) def generate_classpath(self): generate_classpath(self.configs, "%s/.classpath" % self.target_dir) def generate_project(self): tmpl_dir = '%s/templates' % self.install_path generate_project(self.configs, "%s/.project" % self.target_dir, tmpl_dir) def copy_rest_templates(self): settings_dir = "%s/settings" % self.TMPL_DIR for root, dir, files in os.walk(settings_dir): for f in files: src_file = os.path.join(root, f) dst_dir = os.path.join('%s/settings/' % self.target_dir) os.system("cp -f %(src_file)s %(dst_dir)s" % locals()) if __name__ == '__main__': EpgenRuntime().start()
apache-2.0
Python
3498ddd7817e72b3f6f0b851fa94e82047cb9129
Create the config file if doesn't exist
meetmangukiya/chubby,meetmangukiya/chubby
chubby/config.py
chubby/config.py
import os def create_if_not_exists(): """ Create the config file if doesn't exist already. """ # check if it exists if not os.path.exists(os.path.join(os.path.expand("~"), '.chubby')): os.chdir(os.path.expand("~")) # create file with open(".chubby", 'a'): pass
mit
Python
96476a32e545184908f64aac41b23987255138e2
Create new package. (#6623)
tmerrick1/spack,iulian787/spack,EmreAtes/spack,tmerrick1/spack,mfherbst/spack,mfherbst/spack,mfherbst/spack,matthiasdiener/spack,matthiasdiener/spack,iulian787/spack,krafczyk/spack,tmerrick1/spack,krafczyk/spack,LLNL/spack,LLNL/spack,mfherbst/spack,krafczyk/spack,iulian787/spack,iulian787/spack,EmreAtes/spack,EmreAtes/spack,EmreAtes/spack,LLNL/spack,mfherbst/spack,iulian787/spack,krafczyk/spack,matthiasdiener/spack,LLNL/spack,matthiasdiener/spack,matthiasdiener/spack,tmerrick1/spack,tmerrick1/spack,EmreAtes/spack,LLNL/spack,krafczyk/spack
var/spack/repos/builtin/packages/py-htseq/package.py
var/spack/repos/builtin/packages/py-htseq/package.py
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class PyHtseq(PythonPackage): """HTSeq is a Python package that provides infrastructure to process data from high-throughput sequencing assays.""" homepage = "http://htseq.readthedocs.io/en/release_0.9.1/overview.html" url = "https://github.com/simon-anders/htseq/archive/release_0.9.1.tar.gz" version('0.9.1', '269e7de5d39fc31f609cccd4a4740e61') depends_on('py-setuptools', type='build') depends_on('py-numpy', type=('build', 'run')) depends_on('py-pysam', type=('build', 'run')) depends_on('py-matplotlib', type=('build', 'run')) depends_on('py-cython', type=('build', 'run')) depends_on('swig', type=('build', 'run'))
lgpl-2.1
Python
5503e1f54298a5b6121e35794d43c6642b3af6e0
Add lc0340_longest_substring_with_at_most_k_distinct_characters.py
bowen0701/algorithms_data_structures
lc0340_longest_substring_with_at_most_k_distinct_characters.py
lc0340_longest_substring_with_at_most_k_distinct_characters.py
"""Leetcode 340. Longest Substring with At Most K Distinct Characters Hard URL: https://leetcode.com/problems/longest-substring-with-at-most-k-distinct-characters/ Given a string, find the length of the longest substring T that contains at most k distinct characters. Example 1: Input: s = "eceba", k = 2 Output: 3 Explanation: T is "ece" which its length is 3. Example 2: Input: s = "aa", k = 1 Output: 2 Explanation: T is "aa" which its length is 2. """ class Solution(object): def lengthOfLongestSubstringKDistinct(self, s, k): """ :type s: str :type k: int :rtype: int """ pass def main(): pass if __name__ == '__main__': main()
bsd-2-clause
Python
6da928b7e113e30af0da0aa5b18d48c9584a631d
add script
lukeolson/donut
ditto.py
ditto.py
#!/usr/local/bin/python3 """ The purpose of this script is to update dot files somewhere. It works in the following way. Two locations are set dothome : ($HOME) absolute path to the set the dotfiles dotarchive : ($HOME/.dotarchive) absolute path to the dot files (usually some git archive) Then symlinks are made from dothome to dotarchive. Simple as that. """ def main(): # import os # dothome = os.path.expanduser('~') # dotarchive = os.path.join(dothome, '.dotarchive') import argparse parser = argparse.ArgumentParser() parser.add_argument("dothome", help="absolute path to the dotfiles") parser.add_argument("dotarchive", help="absolute path to the dotfile archive") args = parser.parse_args() print(args.dothome) print(args.dotarchive) if __name__ == "__main__": main()
mit
Python
0d0bf5b67f432fd4ee182b9026ea6e319babf9bd
Create ChamBus_create_database.py
cclauss/sql_o_matic
ChamBus_create_database.py
ChamBus_create_database.py
# coding: utf-8 # https://github.com/ChamGeeks/GetAroundChamonix/blob/master/www/js/services/TripPlanner.js import datetime, os, requests, sqlite3 db_filename = 'ChamBus.db' db_url = 'https://chx-transit-db.herokuapp.com/api/export/sql' if os.path.exists(db_filename): exit(db_filename + ' already exists. Rename or delete it and rerun this script.') print('Initializing {}...'.format(db_filename)) start = datetime.datetime.now() with sqlite3.connect(db_filename) as conn: print('Reading sql commands from: {} ...'.format(db_url)) cursor = conn.executescript(requests.get(db_url).text) print('Database tables are:') cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") print('\n'.join(sorted(x[0] for x in cursor.fetchall()))) conn.commit() print('Elapsed time: {}'.format(datetime.datetime.now() - start)) print('=====\nDone.')
apache-2.0
Python
a7b31346835c8fdd1724432596650a6de137fe3f
test read_meta
shengqh/ngsperl,shengqh/ngsperl,shengqh/ngsperl,shengqh/ngsperl
test/Python/test_Func.py
test/Python/test_Func.py
import os, sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../bin'))) from file_def import read_meta import unittest class BasicTestSuite(unittest.TestCase): def test_read_meta(self): meta_file = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../data/SraRunTable.txt')) meta_dic=read_meta(meta_file, 0, 25) print(meta_dic) if __name__ == '__main__': unittest.main()
apache-2.0
Python
c8d48e9996f048b1844258ef427c4359645521c6
Create solution.py
lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges
leetcode/easy/length_of_last_word/py/solution.py
leetcode/easy/length_of_last_word/py/solution.py
class Solution(object): def lengthOfLastWord(self, s): """ :type s: str :rtype: int """ words = s.split() if len(words) > 0: return len(words[-1]) return 0
mit
Python
a84f965e16e68cb8973d6cc91fbacec56bb92a64
add lottery.py
lnmds/jose
ext/lottery.py
ext/lottery.py
import decimal import logging import discord from discord.ext import commands from .common import Cog log = logging.getLogger(__name__) PERCENTAGE_PER_TAXBANK = (0.2 / 100) TICKET_PRICE = 20 class Lottery(Cog): """Weekly lottery. The lottery works with you buying a 20JC lottery ticket. Every Saturday, a winner is chosen from the people who bought a ticket. The winner gets 0.2% of money from all taxbanks. """ def __init__(self, bot): super().__init__(bot) self.ticket_coll = self.config.jose_db['lottery'] @commands.group(aliases=['l'], invoke_without_command=True) async def lottery(self, ctx): """Show current lottery state. A read 'j!help Lottery' is highly recommended. """ amount = decimal.Decimal(0) async for account in self.jcoin.all_accounts('taxbank'): amount += PERCENTAGE_PER_TAXBANK * account['amount'] await ctx.send('Next saturday you have a chance to win: ' f'`{amount:.2}JC`') @lottery.command() async def users(self, ctx): """Show the users that are in the current lottery.""" em = discord.Embed() users = [] async for ticket in self.ticket_coll.find(): users.append(f'<@{ticket["user_id"]}>') em.add_field(name='Users', value='\n'.join(users)) await ctx.send(embed=em) @lottery.command() async def enter(self, ctx, amount: decimal.Decimal): """Enter the weekly lottery.""" await ctx.send('not implemented yet') # Check if the user is in jose guild # Pay 20jc to jose # put user in ticket collection # send message to #lottery-log def setup(bot): bot.add_cog(Lottery(bot))
mit
Python
210429b1acbb099479c06f5bd4ceddfabfa6ee5c
Create qualysguard_remediation_ignore_non-running_kernels.py
paragbaxi/qualysguard_remediation_ignore_by_report_template
qualysguard_remediation_ignore_non-running_kernels.py
qualysguard_remediation_ignore_non-running_kernels.py
#!/usr/bin/env python
apache-2.0
Python
c92954f240ef990eae06967c12426367f0eb6319
Add migration
safwanrahman/readthedocs.org,tddv/readthedocs.org,safwanrahman/readthedocs.org,davidfischer/readthedocs.org,tddv/readthedocs.org,pombredanne/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org,pombredanne/readthedocs.org,rtfd/readthedocs.org,davidfischer/readthedocs.org,safwanrahman/readthedocs.org,safwanrahman/readthedocs.org,davidfischer/readthedocs.org,davidfischer/readthedocs.org,tddv/readthedocs.org,pombredanne/readthedocs.org
readthedocs/donate/migrations/0003_add-impressions.py
readthedocs/donate/migrations/0003_add-impressions.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('donate', '0002_dollar-drop-choices'), ] operations = [ migrations.CreateModel( name='SupporterImpressions', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('date', models.DateField(verbose_name='Date')), ('offers', models.IntegerField(default=0, verbose_name='Offer')), ('views', models.IntegerField(default=0, verbose_name='View')), ('clicks', models.IntegerField(default=0, verbose_name='Clicks')), ('promo', models.ForeignKey(related_name='impressions', blank=True, to='donate.SupporterPromo', null=True)), ], ), ]
mit
Python
dc7cf288c5c5c9733a59184770fbaa26db036833
Add basic tests for custom_urls system
whalerock/ella,petrlosa/ella,WhiskeyMedia/ella,whalerock/ella,whalerock/ella,MichalMaM/ella,petrlosa/ella,MichalMaM/ella,ella/ella,WhiskeyMedia/ella
tests/unit_project/test_core/test_custom_urls.py
tests/unit_project/test_core/test_custom_urls.py
# -*- coding: utf-8 -*- from djangosanetesting import UnitTestCase from django.http import Http404 from ella.core.custom_urls import DetailDispatcher # dummy functions to register as views def view(request, bits, context): return request, bits, context def custom_view(request, context): return request, context class TestCustomUrlsDispatcher(UnitTestCase): def setUp(self): self.dispatcher = DetailDispatcher() self.context = {'object': self} self.request = object() def test_no_extension(self): self.assert_raises(Http404, self.dispatcher._get_view, 'start', self) def test_register_global_extension(self): self.dispatcher.register('start', view) self.assert_equals(view, self.dispatcher._get_view('start', self)) def test_register_extension_for_model(self): self.dispatcher.register('another_start', view, model=self.__class__) self.assert_equals(view, self.dispatcher._get_view('another_start', self.__class__)) def test_register_extension_for_model_not_work_for_other_models(self): self.dispatcher.register('start', view, model=self.__class__) self.assert_raises(Http404, self.dispatcher._get_view, 'start', object()) def test_no_custom_view(self): self.assert_raises(Http404, self.dispatcher._get_custom_detail_view, self.__class__) def test_register_custom_view(self): self.dispatcher.register_custom_detail(self.__class__, custom_view) self.assert_equals(custom_view, self.dispatcher._get_custom_detail_view(self.__class__))
bsd-3-clause
Python
861120c5ba7e6e126cac13497a489bc035d27026
add partition show
you21979/mysql_batch
bin/partition_show.py
bin/partition_show.py
#!/usr/bin/python import datetime import MySQLdb import json import os CONFIG_FILE="partition.json" # ----------------------------------- def config_read(filename): config = json.load(open(filename)) return config # ----------------------------------- def date_show_all_partitions(conn, tablename): lists = [] infotable = "information_schema.PARTITIONS" sql = "SELECT PARTITION_NAME FROM "+ infotable +" WHERE TABLE_NAME='"+ tablename +"' ORDER BY PARTITION_NAME desc;" cur = conn.cursor() cur.execute(sql) res = cur.fetchall() for row in res: lists.append(row[0]) cur.close() return lists def partition_exec(conn, table): lists = date_show_all_partitions(conn, table) for v in lists: if v == "pmax": continue print table + ":" + v def main(): path = os.path.join(os.path.join(os.path.dirname(__file__), ".."), "config"); conf = config_read(os.path.join(path, CONFIG_FILE)) myconf = conf["MYSQL"] conn = MySQLdb.connect(host=myconf["HOST"], db=myconf["DB"], user=myconf["USER"], passwd=myconf["PASS"]) for table in conf["TABLES"]: partition_exec(conn, table) conn.close() main()
apache-2.0
Python
c50a7189e730fc3e95eb209eed00ebdcd7001bde
Create ImgurStorage.py
jmahmood/django-imgurstorage
ImgurStorage.py
ImgurStorage.py
import base64 import os import tempfile from django.core.exceptions import SuspiciousFileOperation from django.core.files import File from django.utils._os import safe_join import requests from django.core.files.storage import Storage from imgurpython import ImgurClient class ImgurStorage(Storage): """ Uses the Imgur cloud service to store images. Great for Heroku This is just a gist, needs some work. """ client_id = "LOL" client_secret = "LOL" access_token = "LOL" refresh_token = "LOL" def upload(self, path): return self.client.upload_from_path(path) def __init__(self): super(ImgurStorage, self).__init__() self.client = ImgurClient(self.client_id, self.client_secret, self.access_token, self.refresh_token) def _open(self, name, mode='rb'): file_url = "http://i.imgur.com/{0}.png".format(name) r = requests.get(file_url) f = tempfile.NamedTemporaryFile(delete=False) for chunk in r.iter_content(chunk_size=512 * 1024): if chunk: # filter out keep-alive new chunks f.write(chunk) f.close() return File(f) def uploaded_path(self, name): try: path = safe_join(self.location, name) except ValueError: raise SuspiciousFileOperation("Attempted access to '%s' denied." % name) return os.path.normpath(path) def get_available_name(self, name): return name def _save(self, name, content): """ Saves new content to the file specified by name. The content should be a proper File object or any python file-like object, ready to be read from the beginning. """ # Get the proper name for the file, as it will actually be saved. if name is None: name = content.name if not hasattr(content, 'chunks'): content = File(content) content.open() data = { 'image': base64.b64encode(content.read()), 'type': 'base64', 'meta': {} } ret = self.client.make_request('POST', 'upload', data, True) content.close() return ret["id"] def url(self, name): return "http://i.imgur.com/{0}.png".format(name) def get_valid_name(self, name): return name def exists(self, name): return True
mit
Python
c153bc9422308599d1354abf782273ca7bd78952
Add a few unit tests for libvirt_conn.
n0ano/ganttclient
nova/tests/virt_unittest.py
nova/tests/virt_unittest.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2010 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import flags from nova import test from nova.virt import libvirt_conn FLAGS = flags.FLAGS class LibvirtConnTestCase(test.TrialTestCase): def test_get_uri_and_template(self): class MockDataModel(object): def __init__(self): self.datamodel = { 'name' : 'i-cafebabe', 'memory_kb' : '1024000', 'basepath' : '/some/path', 'bridge_name' : 'br100', 'mac_address' : '02:12:34:46:56:67', 'vcpus' : 2 } type_uri_map = { 'qemu' : ('qemu:///system', [lambda s: '<domain type=\'qemu\'>' in s, lambda s: 'type>hvm</type' in s, lambda s: 'emulator>/usr/bin/kvm' not in s]), 'kvm' : ('qemu:///system', [lambda s: '<domain type=\'kvm\'>' in s, lambda s: 'type>hvm</type' in s, lambda s: 'emulator>/usr/bin/qemu<' not in s]), 'uml' : ('uml:///system', [lambda s: '<domain type=\'uml\'>' in s, lambda s: 'type>uml</type' in s]), } for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type conn = libvirt_conn.LibvirtConnection(True) uri, template = conn.get_uri_and_template() self.assertEquals(uri, expected_uri) for i, check in enumerate(checks): xml = conn.toXml(MockDataModel()) self.assertTrue(check(xml), '%s failed check %d' % (xml, i)) # Deliberately not just assigning this string to FLAGS.libvirt_uri and # checking against that later on. This way we make sure the # implementation doesn't fiddle around with the FLAGS. testuri = 'something completely different' FLAGS.libvirt_uri = testuri for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type conn = libvirt_conn.LibvirtConnection(True) uri, template = conn.get_uri_and_template() self.assertEquals(uri, testuri)
apache-2.0
Python
07500dbd92aa15540ddf77b96a7072c5f66d34b2
Add files via upload
BenChehade/datasciences
heat_map.py
heat_map.py
# -*- coding: utf-8 -*- """ Created on Wed Jun 21 17:27:18 2017 @author: DWyatt """ import pandas as pd import seaborn as sns import sys df_train = pd.read_csv('train.csv') target = 'SalePrice' variables = [column for column in df_train.columns if column!=target] corr = df_train.corr() sns_heat= sns.heatmap(corr, square=True) fig = sns_heat.get_figure() fig.savefig('heat.png') print([target]) print(variables) #sys.exit() #sns_pair = sns.pairplot(df_train, #x_vars=['SalePrice'], #y_vars=['LotFrontage', 'Neighborhood']) #fig = sns_pair.get_figure() #fig.savefig('pair.png')
mit
Python
e755977ee0ada391149e55d3331bf2ffe045d243
Add a build configuration test for zlib, for #187
pycurl/pycurl,pycurl/pycurl,pycurl/pycurl
examples/tests/test_build_config.py
examples/tests/test_build_config.py
#! /usr/bin/env python # -*- coding: utf-8 -*- # vi:ts=4:et import pycurl import zlib try: from io import BytesIO except ImportError: try: from cStringIO import StringIO as BytesIO except ImportError: from StringIO import StringIO as BytesIO c = pycurl.Curl() c.setopt(c.URL, 'http://pycurl.sourceforge.net') #c.setopt(c.ENCODING, 'deflate') c.setopt(c.HTTPHEADER, ['Accept-Encoding: deflate']) body = BytesIO() c.setopt(c.WRITEFUNCTION, body.write) encoding_found = False def header_function(header): global encoding_found if header.decode('iso-8859-1').lower().startswith('content-encoding: deflate'): encoding_found = True c.setopt(c.HEADERFUNCTION, header_function) c.perform() assert encoding_found print('Server supports deflate encoding') encoded = body.getvalue() # should not raise exceptions zlib.decompress(encoded, -zlib.MAX_WBITS) print('Server served deflated body') c.reset() c.setopt(c.URL, 'http://pycurl.sourceforge.net') c.setopt(c.ENCODING, 'deflate') body = BytesIO() c.setopt(c.WRITEFUNCTION, body.write) encoding_found = False def header_function(header): global encoding_found if header.decode('iso-8859-1').lower().startswith('content-encoding: deflate'): encoding_found = True c.setopt(c.HEADERFUNCTION, header_function) c.perform() assert encoding_found print('Server claimed deflate encoding as expected') # body should be decoded encoded = body.getvalue() if '<html' in encoded.decode('iso-8859-1').lower(): print('Curl inflated served body') else: fail = False try: zlib.decompress(encoded, -zlib.MAX_WBITS) print('Curl did not inflate served body') fail = True except: print('Weird') fail = True if fail: assert False c.close()
lgpl-2.1
Python
9f1c5612c717bac3690d093a27a0a362ff4793b4
add parameters class for fitting data
licode/scikit-beam,scikit-xray/scikit-xray,Nikea/scikit-xray,licode/scikit-xray,CJ-Wright/scikit-beam,scikit-xray/scikit-xray,hainm/scikit-xray,ericdill/scikit-xray,danielballan/scikit-xray,licode/scikit-beam,licode/scikit-xray,tacaswell/scikit-xray,giltis/scikit-xray,tacaswell/scikit-xray,tacaswell/scikit-xray,giltis/scikit-xray,scikit-xray/scikit-xray,ericdill/scikit-xray,licode/scikit-beam,yugangzhang/scikit-beam,ericdill/scikit-xray,danielballan/scikit-xray,celiafish/scikit-xray,danielballan/scikit-xray,tacaswell/scikit-beam,celiafish/scikit-xray,celiafish/scikit-xray,tacaswell/scikit-beam,giltis/scikit-xray,tacaswell/scikit-beam,Nikea/scikit-xray,licode/scikit-xray,hainm/scikit-xray,yugangzhang/scikit-beam,CJ-Wright/scikit-beam,Nikea/scikit-xray,hainm/scikit-xray,yugangzhang/scikit-beam,CJ-Wright/scikit-beam
nsls2/fitting/parameters.py
nsls2/fitting/parameters.py
# Copyright (c) Brookhaven National Lab 2O14 # All rights reserved # BSD License # See LICENSE for full text # @author: Li Li (lili@bnl.gov) # created on 07/20/2014 class ParameterBase(object): """ base class to save data structure for each fitting parameter """ def __init__(self): self.val = None self.min = None self.max = None return class Parameters(object): def __init__(self): self.p_dict = {} return def add(self, **kwgs): if kwgs.has_key('name'): self.p_dict[kwgs['name']] = ParameterBase() if kwgs.has_key('val'): self.p_dict[kwgs['name']].val = kwgs['val'] if kwgs.has_key('min'): self.p_dict[kwgs['name']].min = kwgs['min'] if kwgs.has_key('max'): self.p_dict[kwgs['name']].max = kwgs['max'] else: print "please define parameter name first." print "please define parameters as %s, %s, %s, %s" \ %('name', 'val', 'min', 'max') return def __getitem__(self, name): return self.p_dict[name] def all(self): return self.p_dict
bsd-3-clause
Python
17de6f90ce081984cab528526fcf9d9e7008be14
Create beta_scraping_get_users_honor.py
Orange9000/Codewars,Orange9000/Codewars
Solutions/beta/beta_scraping_get_users_honor.py
Solutions/beta/beta_scraping_get_users_honor.py
from bs4 import BeautifulSoup as BS from urllib.request import urlopen Url = 'https://www.codewars.com/users/leaderboard' def get_honor(username): html = urlopen(Url).read().decode('utf-8') soup = BS(html, 'html.parser') for i in soup.find_all('tr'): try: a = str(i).split('</td>') user = a[0][19:(a[0].find('>')-1)] if user == username: return int(a[-2][4:]) except: continue return "Username not found!"
mit
Python
8c737c22ae5d896f5445995660d664d959ce1c08
add ctc reader
qingqing01/models,PaddlePaddle/models,lcy-seso/models,Superjom/models-1,kuke/models,qingqing01/models,PaddlePaddle/models,Superjom/models-1,Superjom/models-1,PaddlePaddle/models,qingqing01/models,kuke/models,lcy-seso/models,kuke/models,lcy-seso/models,kuke/models
fluid/ocr_recognition/ctc_reader.py
fluid/ocr_recognition/ctc_reader.py
import os import cv2 import numpy as np from paddle.v2.image import load_image class DataGenerator(object): def __init__(self): pass def train_reader(self, img_root_dir, img_label_list): ''' Reader interface for training. :param img_root_dir: The root path of the image for training. :type file_list: str :param img_label_list: The path of the <image_name, label> file for training. :type file_list: str ''' # sort by height, e.g. idx img_label_lines = [] for line in open(img_label_list): # h, w, img_name, labels items = line.split(' ') idx = "{:0>5d}".format(int(items[0])) img_label_lines.append(idx + ' ' + line) img_label_lines.sort() def reader(): for line in img_label_lines: # h, w, img_name, labels items = line.split(' ')[1:] assert len(items) == 4 label = [int(c) for c in items[-1].split(',')] img = load_image(os.path.join(img_root_dir, items[2])) img = np.transpose(img, (2, 0, 1)) #img = img[np.newaxis, ...] yield img, label return reader def test_reader(self, img_root_dir, img_label_list): ''' Reader interface for inference. :param img_root_dir: The root path of the images for training. :type file_list: str :param img_label_list: The path of the <image_name, label> file for testing. :type file_list: list ''' def reader(): for line in open(img_label_list): # h, w, img_name, labels items = line.split(' ') assert len(items) == 4 label = [int(c) for c in items[-1].split(',')] img = load_image(os.path.join(img_root_dir, items[2])) img = np.transpose(img, (2, 0, 1)) #img = img[np.newaxis, ...] yield img, label return reader
apache-2.0
Python
90c7f90a8d409fd68ebe20ed4ac35fd378abfee5
Create flush.py
jluzuria2001/codeSnippets,jluzuria2001/codeSnippets,jluzuria2001/codeSnippets,jluzuria2001/codeSnippets
flush.py
flush.py
f = open('out.log', 'w+') f.write('output is ') # some work s = 'OK.' f.write(s) f.write('\n') f.flush() # some other work f.write('done\n') f.flush() f.close()
mit
Python
ea11ae8919139eae8eaa6b9b1dfe256726d3c584
Copy SBSolarcell tests into individual file
jrsmith3/ibei,jrsmith3/tec,jrsmith3/tec
test/test_SBSolarcell.py
test/test_SBSolarcell.py
# -*- coding: utf-8 -*- import numpy as np import ibei from astropy import units import unittest temp_sun = 5762. temp_earth = 288. bandgap = 1.15 input_params = {"temp_sun": temp_sun, "temp_planet": temp_earth, "bandgap": bandgap, "voltage": 0.5,} class CalculatorsReturnUnits(unittest.TestCase): """ Tests units of the calculator methods returned values. """ def setUp(self): """ Initialize SBSolarcell object from input_params """ self.solarcell = ibei.SQSolarcell(input_params) def test_calc_blackbody_radiant_power_density(self): """ calc_blackbody_radiant_power_density should return value with unit of W m^-2. """ tested_unit = self.solarcell.calc_blackbody_radiant_power_density().unit target_unit = units.Unit("W/m2") self.assertEqual(tested_unit, target_unit) def test_calc_power_density(self): """ calc_power_density should return value with unit of W m^-2. """ tested_unit = self.solarcell.calc_power_density().unit target_unit = units.Unit("W/m2") self.assertEqual(tested_unit, target_unit) def test_calc_power_density_zero_bandgap(self): """ calc_power_density should return value with unit of W m^-2. """ self.solarcell.bandgap = 0 tested_unit = self.solarcell.calc_power_density().unit target_unit = units.Unit("W/m2") self.assertEqual(tested_unit, target_unit) class CalculatorsReturnType(unittest.TestCase): """ Tests type of the calculator methods returned values. """ def setUp(self): """ Initialize SBSolarcell object from input_params """ self.solarcell = ibei.SQSolarcell(input_params) def test_calc_efficiency(self): """ calc_power_density should return value with unit of W m^-2. """ self.assertIsInstance(self.solarcell.calc_efficiency(), float) class CalculatorsReturnValue(unittest.TestCase): """ Tests special values of the calculator methods. """ def setUp(self): """ Initialize SBSolarcell object from input_params """ self.solarcell = ibei.SQSolarcell(input_params) def test_calc_power_density(self): """ calc_power_density should return 0 when bandgap = 0. """ self.solarcell.bandgap = 0 self.assertEqual(0, self.solarcell.calc_power_density()) if __name__ == "__main__": pass
mit
Python
a973b1daca340031c671070e0f102a6114f58fab
add files
MuzammilKhan/Ventriloquy,MuzammilKhan/Ventriloquy,MuzammilKhan/Ventriloquy,MuzammilKhan/Ventriloquy
mysite/wordclips/ventriloquy/test_ventriloquy.py
mysite/wordclips/ventriloquy/test_ventriloquy.py
from django.test import TestCase from wordclips.ventriloquy.ventriloquy import Ventriloquy from wordclips.models import Wordclip class VentriloquyTestCase(TestCase): def setUp(self): self.ventriloquy = Ventriloquy() # Put dummy object in databse for testing purpose Wordclip.objects.create(name="how") Wordclip.objects.create(name="are") Wordclip.objects.create(name="you") Wordclip.objects.create(name="people") def test_found_in_db(self): err, lst = self.ventriloquy.check_words(["how", "are", "you"]) o1 = Wordclip.objects.get(name="how") o2 = Wordclip.objects.get(name="are") o3 = Wordclip.objects.get(name="you") self.assertEqual(err, 0) self.assertEqual(lst, [o1, o2, o3]) def test_not_found_in_db(self): """ Test objects not being found in the database, the first word that can not be found will be returned """ err, lst = self.ventriloquy.check_words(["how", "shooot"]) self.assertEqual(err, -1) self.assertEqual(lst, "shooot") def test_creating_audio_success(self): """ Test audio being successfully created """ err, lst = self.ventriloquy.create_audio(["how", "are", "you", "people"]) self.assertEqual(err, 0) self.assertEqual(lst, []) def test_creating_audio_failed(self): """ Test audio created failed """ err, lst = self.ventriloquy.create_audio(["how", "are", "you", "people", "damn", "it"]) self.assertEqual(err, -1) self.assertEqual(lst, "damn")
mit
Python
8fd466ecd16db736177104902eb84f661b2b62cc
Create sitemap for google news
jeanmask/opps,opps/opps,YACOWS/opps,williamroot/opps,opps/opps,YACOWS/opps,williamroot/opps,williamroot/opps,opps/opps,YACOWS/opps,jeanmask/opps,YACOWS/opps,opps/opps,jeanmask/opps,williamroot/opps,jeanmask/opps
opps/sitemaps/googlenews.py
opps/sitemaps/googlenews.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from django.contrib.sitemaps import GenericSitemap from django.contrib.sites.models import Site class GoogleNewsSitemap(GenericSitemap): # That's Google News limit. Do not increase it! limit = 1000 sitemap_template = 'sitemap_googlenews.xml' def get_urls(self, page=1, site=None): if site is None: site = Site.objects.get_current() sup = super(GoogleNewsSitemap, self) old_urls = sup.get_urls(page, site) urls = [] for item in self.paginator.page(page).object_list: for url in old_urls: loc = "http://%s%s" % (site.domain, self.location(item)) if url.get('location') == loc: old_urls.remove(url) url['item'] = item urls.append(url) return urls
mit
Python
2a106a12db2a59ccb0517a13db67b35f475b3ef5
Add args to survey_data url
chispita/epiwork,chispita/epiwork,chispita/epiwork,chispita/epiwork,chispita/epiwork,chispita/epiwork,chispita/epiwork
apps/survey/urls.py
apps/survey/urls.py
from django.conf.urls.defaults import * from . import views urlpatterns = patterns('', url(r'^profile/$', views.profile_index, name='survey_profile'), url(r'^profile/electric/$', views.profile_electric, name='survey_profile_electric'), url(r'^main/$', views.main_index), url(r'^group_management/$', views.group_management, name='group_management'), url(r'^survey_management/$', views.survey_management, name='survey_management'), url(r'^survey_data/(?P<survey_shortname>.+)/(?P<id>\d+)/$', views.survey_data, name='survey_data'), #url(r'^survey_data/(?P<survey_shortname>.+)/$', views.survey_data, name='survey_data'), url(r'^thanks_profile/$', views.thanks_profile, name='profile_thanks'), url(r'^$', views.index, name='survey_index'), )
from django.conf.urls.defaults import * from . import views urlpatterns = patterns('', url(r'^profile/$', views.profile_index, name='survey_profile'), url(r'^profile/electric/$', views.profile_electric, name='survey_profile_electric'), url(r'^main/$', views.main_index), url(r'^group_management/$', views.group_management, name='group_management'), url(r'^survey_management/$', views.survey_management, name='survey_management'), url(r'^survey_data/$', views.survey_data, name='survey_management'), url(r'^thanks_profile/$', views.thanks_profile, name='profile_thanks'), url(r'^$', views.index, name='survey_index'), )
agpl-3.0
Python
b85b8915b73433f74d8ee5c6f6ef9f88d8b82bd8
add original py script
UC3Music/songbook-tools,UC3Music-e/genSongbook,UC3Music/genSongbook
genSongbook.py
genSongbook.py
#!/usr/bin/python import os f = open('songbook.tex', 'w') s = """% songbook.tex %\documentclass[11pt,a4paper]{article} % article format \documentclass[11pt,a4paper,openany]{book} % book format \usepackage[margin=0.7in]{geometry} %\usepackage[utf8]{inputenc} % tildes \usepackage{graphics} \usepackage[dvips]{graphicx} \usepackage{hyperref} \usepackage{verbatim} % next if more than 100 chapters. \usepackage{titletoc} \\titlecontents{chapter}[2.5em] {\\addvspace{0.5pc}\\bfseries} {\contentslabel{2em}} {} {\\titlerule*[0.3pc]{.}\contentspage} \hypersetup{ pdftitle={Songbook (English) - Summer 2014}, pdfsubject={Songbook (English) - Summer 2014}, pdfauthor={jgvictores}, pdfkeywords={songbook} {english} {summer} {2014}, colorlinks, citecolor=black, filecolor=black, linkcolor=black, urlcolor=black, bookmarks } \makeatletter \\renewcommand{\@makechapterhead}[1]{% {\setlength{\parindent}{0pt} \\raggedright \\normalfont \\bfseries S-\\thechapter.\ #1 \par\\nobreak\\vspace{10 pt}}} \makeatother \\begin{document} \Large \\title{Songbook (English)} \\author{by -j} \date{Summer 2014} \maketitle \cleardoublepage \\tableofcontents \\newpage % book format %-- To force blank page: %\\newpage %\\thispagestyle{empty} %\\mbox{} """ for dirname, dirnames, filenames in os.walk('/opt/Dropbox/lyrics/english'): for filename in sorted(filenames): s += "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n" name, extension = os.path.splitext(filename) s += "\chapter{" + name + "}\n" s += "\\begin{verbatim}\n" song = open( os.path.join(dirname, filename) ) s += song.read() s += "\\end{verbatim}\n" s += "\n" s += """ \end{document} """ f.write(s)
unlicense
Python
ac5b1181ff73b9d12c09731a646dac7fa23c342b
Add Weatherbit module
DesertBot/DesertBot
desertbot/modules/commands/weather/Weatherbit.py
desertbot/modules/commands/weather/Weatherbit.py
from collections import OrderedDict from datetime import datetime from twisted.plugin import IPlugin from zope.interface import implementer from desertbot.moduleinterface import IModule from desertbot.modules.commands.weather.BaseWeatherCommand import BaseWeatherCommand, getFormattedWeatherData, \ getFormattedForecastData @implementer(IPlugin, IModule) class Weatherbit(BaseWeatherCommand): weatherBaseURL = "https://api.weatherbit.io/v2.0" def __init__(self): subCommands = OrderedDict([ ('weather', self.getWeather), ('forecast', self.getForecast)] ) BaseWeatherCommand.__init__(self, "Weatherbit", subCommands) def triggers(self): return ["weatherbit"] def getWeather(self, location) -> str: return self._handleCommand("weather", location) def getForecast(self, location) -> str: return self._handleCommand("forecast", location) def _handleCommand(self, subCommand, location) -> str: request = subCommand params = { "lat": location["latitude"], "lon": location["longitude"], "units": "M", "key": self.apiKey } if subCommand == "weather": request = "current" if subCommand == "forecast": request = "forecast/daily" params["days"] = 4 url = "{}/{}".format(self.weatherBaseURL, request) result = self.bot.moduleHandler.runActionUntilValue("fetch-url", url, params) output = None if not result: output = "No weather for this location could be found at this moment. Try again later." else: j = result.json() if "data" not in j or "count" in j and j["count"] == 0: output = "The Weatherbit API returned an unknown reply." else: if subCommand == "weather": output = _parseWeather(j) elif subCommand == "forecast": output = _parseForecast(j) return output def _parseWeather(json): data = json["data"][0] weatherData = { "weatherCode": data["weather"]["code"], "description": data["weather"]["description"], "tempC": data["temp"], "humidity": data["rh"], "windSpeedMs": data["wind_spd"], "timestamp": data["ts"], "windDir": data["wind_dir"] } return getFormattedWeatherData(weatherData) def _parseForecast(json): daysList = json["data"] forecastData = [] for day in daysList: forecastData.append({ "date": datetime.fromtimestamp(day['ts']).strftime("%A"), "minC": day["low_temp"], "maxC": day["max_temp"], "weatherCode": day["weather"]["code"], "description": day["weather"]["description"], }) return getFormattedForecastData(forecastData) weatherbitCommand = Weatherbit()
mit
Python
f3a6281098b11ddd353a394d914186d5c7683f9b
add jupyter module
RasaHQ/rasa_core,RasaHQ/rasa_nlu,RasaHQ/rasa_core,RasaHQ/rasa_nlu,RasaHQ/rasa_core,RasaHQ/rasa_nlu
rasa/jupyter.py
rasa/jupyter.py
import pprint as pretty_print from typing import Any, Dict, Text, TYPE_CHECKING from rasa_core.utils import print_success, print_error if TYPE_CHECKING: from rasa_core.agent import Agent from rasa_core.interpreter import NaturalLanguageInterpreter def pprint(object: Any): pretty_print.pprint(object, indent=2) def chat(model: Text = None, agent: 'Agent' = None, interpreter: 'NaturalLanguageInterpreter' = None): if model: from rasa.run import create_agent agent = create_agent(model) elif agent and interpreter: agent.set_interpreter(interpreter) else: print_error("You either have to define a model path or an agent and " "an interpreter.") print("Your bot is ready to talk! Type your messages here or send '/stop'.") while True: message = input() if message == '/stop': break for response in agent.handle_text(message): _display_bot_response(response) def _display_bot_response(response: Dict): from IPython.display import Image, display for response_type, value in response.items(): if response_type == 'text': print_success(value) if response_type == 'image': image = Image(url=value) display(image,)
apache-2.0
Python
d8407723f9bf40ca166e5471e76c03c257bc71f9
Add lc208_implement_trie_prefix_tree.py
bowen0701/algorithms_data_structures
lc208_implement_trie_prefix_tree.py
lc208_implement_trie_prefix_tree.py
"""Leetcode 208. Implement Trie (Prefix Tree) Medium URL: https://leetcode.com/problems/implement-trie-prefix-tree/ Implement a trie with insert, search, and startsWith methods. Example: Trie trie = new Trie(); trie.insert("apple"); trie.search("apple"); // returns true trie.search("app"); // returns false trie.startsWith("app"); // returns true trie.insert("app"); trie.search("app"); // returns true Note: You may assume that all inputs are consist of lowercase letters a-z. All inputs are guaranteed to be non-empty strings. Your Trie object will be instantiated and called as such: obj = Trie() obj.insert(word) param_2 = obj.search(word) param_3 = obj.startsWith(prefix) """ class Trie(object): def __init__(self): """ Initialize your data structure here. """ pass def insert(self, word): """ Inserts a word into the trie. :type word: str :rtype: None """ pass def search(self, word): """ Returns if the word is in the trie. :type word: str :rtype: bool """ pass def startsWith(self, prefix): """ Returns if there is any word in the trie that starts with the given prefix. :type prefix: str :rtype: bool """ pass def main(): pass if __name__ == '__main__': main()
bsd-2-clause
Python
8a7c3ad110c00e6049fa452634d06a6873a36f90
Add an examples folder.
koenedaele/skosprovider
examples/api.py
examples/api.py
# -*- coding: utf-8 -*- ''' This example demonstrates the skosprovider API with a simple DictionaryProvider containing just three items. ''' from skosprovider.providers import DictionaryProvider from skosprovider.uri import UriPatternGenerator from skosprovider.skos import ConceptScheme larch = { 'id': '1', 'uri': 'http://id.trees.org/1', 'labels': [ {'type': 'prefLabel', 'language': 'en', 'label': 'The Larch'}, {'type': 'prefLabel', 'language': 'nl', 'label': 'De Lariks'} ], 'notes': [ {'type': 'definition', 'language': 'en', 'note': 'A type of tree.'} ], 'member_of': ['3'], 'matches': { 'close': ['http://id.python.org/different/types/of/trees/nr/1/the/larch'] } } chestnut = { 'id': '2', 'uri': 'http://id.trees.org/2', 'labels': [ {'type': 'prefLabel', 'language': 'en', 'label': 'The Chestnut'}, {'type': 'altLabel', 'language': 'nl', 'label': 'De Paardekastanje'}, {'type': 'altLabel', 'language': 'fr', 'label': 'la châtaigne'} ], 'notes': [ { 'type': 'definition', 'language': 'en', 'note': 'A different type of tree.' } ], 'member_of': ['3'], 'matches': { 'related': ['http://id.python.org/different/types/of/trees/nr/17/the/other/chestnut'] } } species = { 'id': 3, 'uri': 'http://id.trees.org/3', 'labels': [ {'type': 'prefLabel', 'language': 'en', 'label': 'Trees by species'}, {'type': 'prefLabel', 'language': 'nl', 'label': 'Bomen per soort'} ], 'type': 'collection', 'members': ['1', '2'], 'notes': [ { 'type': 'editorialNote', 'language': 'en', 'note': 'As seen in How to Recognise Different Types of Trees from Quite a Long Way Away.' } ] } provider = DictionaryProvider( { 'id': 'TREES', 'default_language': 'nl', 'subject': ['biology'] }, [larch, chestnut, species], uri_generator=UriPatternGenerator('http://id.trees.org/types/%s'), concept_scheme=ConceptScheme('http://id.trees.org') ) # Get a concept or collection by id print(provider.get_by_id(1).label().label) # Get a concept or collection by uri print(provider.get_by_uri('http://id.trees.org/types/1')) # Get all concepts and collections in a provider # If possible, show a Dutch(as spoken in Belgium) label print(provider.get_all(language='nl-BE')) # Get the top concepts in a provider print(provider.get_top_concepts()) # Find anything that has a label of horse print(provider.find({'label': 'The Larch'})) # Get the top of a display hierarchy print(provider.get_top_display()) # Get the children to display in a hierarchy concept 1 # If possible, show a French(as spoken in Belgium) label print(provider.get_children_display(3, language='fr-BE')) # Get all concepts underneath a concept or collection print(provider.expand(3))
mit
Python
1b00a597d8145b2df05054fef8d072d452209463
Make SurfaceHandler (for sfc data)
gciteam6/xgboost,gciteam6/xgboost
src/data/surface.py
src/data/surface.py
from glob import glob # Third-party modules import pandas as pd # Hand-made modules from base import LocationHandlerBase SFC_REGEX_DIRNAME = "sfc[1-5]" KWARGS_READ_CSV_SFC_MASTER = { "index_col": 0, } KWARGS_READ_CSV_SFC_LOG = { "index_col": 0, "na_values": ['', ' '] } class SurfaceHandler(LocationHandlerBase): def __init__(self, sfc_master_filepath, sfc_file_prefix="sfc_", sfc_file_suffix=".tsv"): super().__init__(sfc_master_filepath, **KWARGS_READ_CSV_SFC_MASTER) self.sfc_file_prefix = sfc_file_prefix self.sfc_file_suffix = sfc_file_suffix self.SFC_REGEX_DIRNAME = SFC_REGEX_DIRNAME def read_tsv(self, path_or_buf): df_ret = pd.read_csv(path_or_buf, **self.gen_read_csv_kwargs(KWARGS_READ_CSV_SFC_LOG)) df_ret.index = self.parse_datetime(pd.Series(df_ret.index).apply(str)) return df_ret def to_tsv(self, df, path_or_buf, **kwargs): df.to_csv(path_or_buf, **self.gen_to_csv_kwargs(kwargs)) def gen_filepath_list(self, aid_list): sfc_regex_filepath_list = [ self.path.join( self.INTERIM_DATA_BASEPATH, self.SFC_REGEX_DIRNAME, self.sfc_file_prefix + str(aid) + self.sfc_file_suffix ) for aid in aid_list ] return [ sfc_file \ for sfc_regex_filepath in sfc_regex_filepath_list \ for sfc_file in glob(sfc_regex_filepath) ] def retrive_data(self, filepath_list, name_list): if len(filepath_list) < 1: raise ValueError("Empty list ?") df_ret = self.read_tsv(filepath_list[0]) df_ret.columns = [str(col_name) + '_' + name_list[0] for col_name in df_ret.columns] if len(filepath_list) > 1: for filepath, name in zip(filepath_list[1:], name_list[1:]): df_ret = df_ret.merge( self.read_tsv(filepath), how="outer", left_index=True, right_index=True, suffixes=(".", "_{}".format(name)) ) return df_ret if __name__ == '__main__': print("Surface!")
mit
Python
c025cd6649e2326ade7b81df8408c4363fdb2050
add music handler
free-free/pyblog,free-free/pyblog,free-free/pyblog,free-free/pyblog
app/music_handler.py
app/music_handler.py
#-*- coding:utf-8 -*- from tools.httptools import Route from models import Music @Route.get("/music") def get_music_handler(app): ret={}; ret['code']=200 ret['msg']='ok' ret['type']=3 ret['data']=[ {'music_name':'CountrintStars','music_url':'http://7xs7oc.com1.z0.glb.clouddn.com/music%2FJason%20Chen%20-%20Counting%20Stars.mp3'}, ] return ret @Route.post("/music") def post_music_handler(app): return 'ok'
mit
Python
3091555ca7fc421f886a1df1ac28f677feb70a53
Add default value for the fields object and field of the social network app model
rebearteta/social-ideation,rebearteta/social-ideation,rebearteta/social-ideation,joausaga/social-ideation,rebearteta/social-ideation,joausaga/social-ideation,joausaga/social-ideation,joausaga/social-ideation
app/migrations/0006_auto_20150825_1513.py
app/migrations/0006_auto_20150825_1513.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('app', '0005_auto_20150819_1054'), ] operations = [ migrations.AlterField( model_name='socialnetworkapp', name='field_real_time_updates', field=models.CharField(default=b'feed', max_length=50, null=True, blank=True), preserve_default=True, ), migrations.AlterField( model_name='socialnetworkapp', name='object_real_time_updates', field=models.CharField(default=b'page', max_length=100, null=True, blank=True), preserve_default=True, ), ]
mit
Python
f05bd26c7a275c38c092c821e5ef62284c36e783
Test transormation matrices
laputian/dml
test/test_interpolate.py
test/test_interpolate.py
import pywt import sys import numpy as np from scipy.ndimage.interpolation import affine_transform sys.path.insert(0, '../mlp_test') from data_utils import load_mnist from skimage import transform as tf test_data = load_mnist()[2] chosen_index = 7 test_x_chosen = test_data[0][chosen_index] test_y_chosen = test_data[1][chosen_index] transm = np.eye(28, k=0) + np.eye(28, k=1) pic_arr = test_x_chosen.reshape((28, 28)) pic_trans = np.dot(pic_arr, transm) import matplotlib.pyplot as plt import matplotlib.cm as cm plt.subplot(2 , 1, 1) plt.imshow(pic_arr, cmap = cm.Greys_r,interpolation='nearest') plt.subplot(2 , 1, 2) plt.imshow(pic_trans, cmap = cm.Greys_r,interpolation='nearest') plt.show()
mit
Python
47cbcf130e76604ed93306f02fc2221a276d3bbf
Split out
cropleyb/pentai,cropleyb/pentai,cropleyb/pentai
pentai/gui/spacer.py
pentai/gui/spacer.py
from kivy.uix.widget import Widget class HSpacer(Widget): pass class VSpacer(Widget): pass
mit
Python
e37a616d23805ced7250d4cdd6422751d8ae5143
Add populate_anticrispr.py
goyalsid/phageParser,mbonsma/phageParser,phageParser/phageParser,phageParser/phageParser,mbonsma/phageParser,mbonsma/phageParser,phageParser/phageParser,goyalsid/phageParser,goyalsid/phageParser,mbonsma/phageParser,phageParser/phageParser
phageAPI/populate_anticrispr.py
phageAPI/populate_anticrispr.py
#! /usr/bin/env python import os from Bio import SeqIO import textwrap def populate(sequences, AntiCRISPR): for seq in sequences: spacer, _ = AntiCRISPR.objects.get_or_create( accession=seq.name, sequence=str(seq.seq)) spacer.save() def main(): import argparse parser = argparse.ArgumentParser(description=textwrap.dedent("""\ Import anticrispr sequences into the API DB. To use, first get the list of accession numbers from https://www.nature.com/articles/nmicrobiol201685. This list is available locally in `data/antiCRISPR_accessions.txt`, The script `acc2gb.py` can then be used to download the antiCRISPR protein sequence in fasta format, assuming you have NICB access: cat data/antiCRISPR_accessions.txt | python acc2gb.py your@email.com protein fasta > anticrispr.txt Finally, populate the database with the accession numbers in the accession field and the sequences in the sequence field: cd phageAPI populate_anticrispr.py ../anticrispr.txt """), formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('sequences', metavar='FILE', nargs=1, help='path to sequences file, in fasta format') args = parser.parse_args() os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'phageAPI.settings') import django django.setup() from restapi.models import AntiCRISPR populate(SeqIO.parse(args.sequences[0], 'fasta'), AntiCRISPR) if __name__ == '__main__': main()
mit
Python
333cbe13d8104934a924f223427fa06a60a8b080
Create php4dvd_1.py
x0mak/test---project---python---Kurbatova
php4dvd/php4dvd_1.py
php4dvd/php4dvd_1.py
# -*- coding: utf-8 -*- from selenium import webdriver import unittest class Untitled(unittest.TestCase): def setUp(self): self.driver = webdriver.Firefox() self.driver.implicitly_wait(30) self.base_url = "http://localhost:8080/" self.verificationErrors = [] self.accept_next_alert = True def test_untitled(self): driver = self.driver driver.get(self.base_url + "/php4dvd/") driver.find_element_by_id("username").clear() driver.find_element_by_id("username").send_keys("admin") driver.find_element_by_name("password").clear() driver.find_element_by_name("password").send_keys("admin") driver.find_element_by_name("submit").click() def is_element_present(self, how, what): try: self.driver.find_element(by=how, value=what) except NoSuchElementException, e: return False return True def is_alert_present(self): try: self.driver.switch_to_alert() except NoAlertPresentException, e: return False return True def close_alert_and_get_its_text(self): try: alert = self.driver.switch_to_alert() alert_text = alert.text if self.accept_next_alert: alert.accept() else: alert.dismiss() return alert_text finally: self.accept_next_alert = True def tearDown(self): self.driver.quit() self.assertEqual([], self.verificationErrors) if __name__ == "__main__": unittest.main()
apache-2.0
Python
d894e39e0280aaa45cef914f2202e978797b26fb
Update and rename 2 to 28.py
krzyszti/my_projects,krzyszti/my_projects,krzyszti/my_projects,krzyszti/my_projects
exercises/28.py
exercises/28.py
''' Write a function find_longest_word() that takes a list of words and returns the length of the longest one. Use only higher order functions. ''' def find_longest_word(lst): return len(max(lst, key=len))
mit
Python
183b0c573478ff5e2480758abec629ddce4f0766
Create missing migration for model Meta changes in 9d1e29150407e906bc651a8249c53e5e6d1fb1e7.
mozilla/telemetry-analysis-service,mozilla/telemetry-analysis-service,mozilla/telemetry-analysis-service,mozilla/telemetry-analysis-service
atmo/jobs/migrations/0035_auto_20170529_1424.py
atmo/jobs/migrations/0035_auto_20170529_1424.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.1 on 2017-05-29 14:24 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('jobs', '0034_auto_20170529_1424'), ] operations = [ migrations.AlterModelOptions( name='sparkjobrun', options={'get_latest_by': 'created_at', 'ordering': ['-created_at']}, ), ]
mpl-2.0
Python