commit
stringlengths
40
40
subject
stringlengths
4
1.73k
repos
stringlengths
5
127k
old_file
stringlengths
2
751
new_file
stringlengths
2
751
new_contents
stringlengths
1
8.98k
old_contents
stringlengths
0
6.59k
license
stringclasses
13 values
lang
stringclasses
23 values
04142e3bd0c09e6f712669529e780a18c11c7076
Add script to validate files are valid DocBook.
savinash47/openstack-doc-tools,openstack/openstack-doc-tools,openstack/openstack-doc-tools,savinash47/openstack-doc-tools,savinash47/openstack-doc-tools
validate.py
validate.py
#!/usr/bin/env python ''' Usage: validate.py [path] Validates all xml files against the DocBook 5 RELAX NG schema. Options: path Root directory, defaults to <repo root>/doc/src/doc/docbkx Ignores pom.xml files and subdirectories named "target". Requires Python 2.7 or greater (for argparse) and the lxml Python library. ''' from lxml import etree import argparse import os import subprocess import sys import urllib2 def get_schema(): """Return the DocBook RELAX NG schema""" url = "http://www.docbook.org/xml/5.0/rng/docbookxi.rng" relaxng_doc = etree.parse(urllib2.urlopen(url)) return etree.RelaxNG(relaxng_doc) def validation_failed(schema, doc): """Return True if the parsed doc fails against the schema This will ignore validation failures of the type: IDREF attribute linkend references an unknown ID. This is because we are validating individual files that are being imported, and sometimes the reference isn't present in the current file.""" return not schema.validate(doc) and \ any(log.type_name != "DTD_UNKNOWN_ID" for log in schema.error_log) def error_message(error_log): """Return a string that contains the error message. We use this to filter out false positives related to IDREF attributes """ errs = [str(x) for x in error_log if x.type_name != 'DTD_UNKNOWN_ID'] # Reverse output so that earliest failures are reported first errs.reverse() return "\n".join(errs) def main(rootdir): schema = get_schema() any_failures = False for root, dirs, files in os.walk(rootdir): # Don't descend into 'target' subdirectories try: ind = dirs.index('target') del dirs[ind] except ValueError: pass for f in files: # Ignore maven files, which are called pom.xml if f.endswith('.xml') and f != 'pom.xml': try: path = os.path.abspath(os.path.join(root, f)) doc = etree.parse(path) if validation_failed(schema, doc): any_failures = True print error_message(schema.error_log) except etree.XMLSyntaxError as e: any_failures = True print "%s: %s" % (path, e) if any_failures: sys.exit(1) def default_root(): """Return the location of openstack-manuals/doc/src/docbkx The current working directory must be inside of the openstack-manuals repository for this method to succeed""" args = ["git", "rev-parse", "--show-toplevel"] gitroot = subprocess.check_output(args).rstrip() return os.path.join(gitroot, "doc/src/docbkx") if __name__ == '__main__': parser = argparse.ArgumentParser(description="Validate XML files against " "the DocBook 5 RELAX NG schema") parser.add_argument('path', nargs='?', default=default_root(), help="Root directory that contains DocBook files, " "defaults to `git rev-parse --show-toplevel`/doc/src/" "docbkx") args = parser.parse_args() main(args.path)
apache-2.0
Python
7cb62f554fa293a2ba4d0456ed8d04e8f277d2c1
Add migrations/0146_clean_lexeme_romanised_3.py
lingdb/CoBL-public,lingdb/CoBL-public,lingdb/CoBL-public,lingdb/CoBL-public
ielex/lexicon/migrations/0146_clean_lexeme_romanised_3.py
ielex/lexicon/migrations/0146_clean_lexeme_romanised_3.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals, print_function from django.db import migrations def forwards_func(apps, schema_editor): Lexeme = apps.get_model("lexicon", "Lexeme") replaceMap = { 'λ': 'ʎ', 'φ': 'ɸ' } for lexeme in Lexeme.objects.all(): if len(set(replaceMap.keys()) & set(lexeme.romanised)): for k, v in replaceMap.items(): lexeme.romanised = lexeme.romanised.replace(k, v) lexeme.save() def reverse_func(apps, schema_editor): pass class Migration(migrations.Migration): dependencies = [('lexicon', '0145_fix_language_distributions')] operations = [ migrations.RunPython(forwards_func, reverse_func), ]
bsd-2-clause
Python
8f1b1ef01e74782f57da9c9489a3a7f6555bbee6
Add tests for reports views.
birkbeckOLH/annotran,birkbeckOLH/annotran,birkbeckOLH/annotran
annotran/reports/test/views_test.py
annotran/reports/test/views_test.py
# -*- coding: utf-8 -*- import mock import pytest from pyramid import httpexceptions from annotran.reports import views _SENTINEL = object()
mit
Python
691ae15cb0f46400762c27305fb74f57fa1ffccf
Implement account.py
ThomasLee969/net.tsinghua,ThomasLee969/net.tsinghua,ThomasLee969/net.tsinghua
src/account.py
src/account.py
from datetime import datetime from hashlib import md5 from re import match, search, DOTALL from requests.sessions import Session from bs4 import BeautifulSoup BASE_URL = 'https://usereg.tsinghua.edu.cn' LOGIN_PAGE = BASE_URL + '/do.php' INFO_PAGE = BASE_URL + '/user_info.php' class Account(object): """Tsinghua Account""" def __init__(self, username, password, is_md5=False): super(Account, self).__init__() self.username = username if is_md5: if len(password) != 32: raise ValueError('Length of a MD5 string must be 32') self.md5_pass = password else: self.md5_pass = md5(password.encode()).hexdigest() # Account Infomations. self.name = '' self.id = '' # Balance & Usage. self.balance = 0 self.ipv4_byte = 0 self.ipv6_byte = 0 self.last_check = None # Status. self.valid = False def check(self): try: s = Session() payload = dict(action='login', user_login_name=self.username, user_password=self.md5_pass) login = s.post(LOGIN_PAGE, payload) if not login: # Not a normal response, mayby the server is down? return False if login.text == 'ok': self.valid = True self.update_infos(s) else: self.valid = False # Checking complete. self.last_check = datetime.today() return True except: # Things happened so checking did not finish. return False def update_infos(self, session): # Parse HTML. soup = BeautifulSoup(session.get(INFO_PAGE).text, 'html.parser') blocks = map(BeautifulSoup.get_text, soup.select('.maintd')) i = map(str.strip, blocks) # Only works in python 3. infos = dict(zip(i, i)) self.name = infos['姓名'] self.id = infos['证件号'] self.balance = head_float(infos['帐户余额']) self.ipv4_byte = head_int(infos['使用流量(IPV4)']) self.ipv6_byte = head_int(infos['使用流量(IPV6)']) def __repr__(self): return '<Account(%s, %s, %sB, ¥%s, %s)>' % (self.username, self.valid, self.ipv4_byte, self.balance, self.last_check) def head_int(s): return int(match(r'\d+', s).group()) def head_float(s): return float(match(r'\d+(\.\d+)?', s).group()) if __name__ == '__main__': acc = Account("lisihan13", "1L2S3H@th") acc.check() print(acc)
mit
Python
940299a7bfd967653899b176ce76e6f1cf02ca83
Add script to generate pairs of LIWC categories
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
liwcpairs2es.py
liwcpairs2es.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from elasticsearch import Elasticsearch, helpers from collections import Counter from datetime import datetime def find_pairs(list1, list2): pairs = [] if list1 and list2: for item1 in list1: for item2 in list2: pairs.append(u'{}@{}'.format(item1, item2)) return pairs es = Elasticsearch() index_name = 'embem' doc_type = 'event' cat1 = 'Body' cat2 = 'Posemo' timestamp = datetime.now().isoformat() pairs_count = Counter() years = {} q = { "query": { "wildcard": {"text_id": "*"} } } results = helpers.scan(client=es, query=q, index=index_name, doc_type=doc_type) for r in results: # get tags cat1_tags = r.get('_source').get('liwc-entities').get('data').get(cat1) cat2_tags = r.get('_source').get('liwc-entities').get('data').get(cat2) # find all pairs pairs = find_pairs(cat1_tags, cat2_tags) if pairs: for pair in pairs: pairs_count[pair] += 1 year = r.get('_source').get('year') if year not in years.keys(): years[year] = Counter() years[year][pair] += 1 # save pairs to ES doc = { 'doc': { 'pairs-{}-{}'.format(cat1, cat2): { 'data': pairs, 'num_pairs': len(pairs), 'timestamp': timestamp } } } es.update(index=index_name, doc_type=doc_type, id=r.get('_id'), body=doc) sorted_years = years.keys() sorted_years.sort() print '{}\t{}\tFrequency'.format(cat1, cat2) + \ ''.join(['\t{}'.format(k) for k in sorted_years]) print 'TOTAL\tTOTAL\t{}'.format(sum(pairs_count.values())) + \ ''.join(['\t{}'.format(sum(years[k].values())) for k in sorted_years]) for p, f in pairs_count.most_common(): (w1, w2) = p.split('@') print u'{}\t{}\t{}'.format(w1, w2, f).encode('utf-8') + \ ''.join(['\t{}'.format(years[k][p]) for k in sorted_years])
apache-2.0
Python
73292532767d736a77ec8b122cfd4ff19b7d991b
Create Account dashboard backend
lakewik/storj-gui-client
UI/account_dash.py
UI/account_dash.py
# -*- coding: utf-8 -*- import threading from PyQt4 import QtCore, QtGui from qt_interfaces.account_dash_ui import Ui_AccountDash from engine import StorjEngine from utilities.tools import Tools # Synchronization menu section # class AccountDashUI(QtGui.QMainWindow): def __init__(self, parent=None,): QtGui.QWidget.__init__(self, parent) self.account_dash_ui = Ui_AccountDash() self.account_dash_ui.setupUi(self) self.storj_engine = StorjEngine() # init StorjEngine self.tools = Tools() self.initialize_buckets_stats_table() self.createNewBucketsStatsGetThread() def createNewBucketsStatsGetThread(self): thread = threading.Thread(target=self.fill_buckets_stats_table, args=()) thread.start() def initialize_buckets_stats_table(self): self.table_header = ['Bucket name', 'Files count', 'Total used space'] self.account_dash_ui.buckets_stats_table.setColumnCount(3) self.account_dash_ui.buckets_stats_table.setRowCount(0) horHeaders = self.table_header self.account_dash_ui.buckets_stats_table.setHorizontalHeaderLabels(horHeaders) self.account_dash_ui.buckets_stats_table.resizeColumnsToContents() self.account_dash_ui.buckets_stats_table.resizeRowsToContents() self.account_dash_ui.buckets_stats_table.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch) def fill_buckets_stats_table(self): total_files_size = 0 total_files_count = 0 for bucket in self.storj_engine.storj_client.bucket_list(): total_bucket_files_size = 0 total_bucket_files_count = 0 # fill table table_row_count = self.account_dash_ui.buckets_stats_table.rowCount() self.account_dash_ui.buckets_stats_table.setRowCount( table_row_count + 1) for file in self.storj_engine.storj_client.bucket_files(bucket_id=bucket.id): total_bucket_files_size += int(file['size']) total_bucket_files_count += 1 self.account_dash_ui.buckets_stats_table.setItem( table_row_count, 0, QtGui.QTableWidgetItem(bucket.name)) self.account_dash_ui.buckets_stats_table.setItem( table_row_count, 1, QtGui.QTableWidgetItem(str(total_bucket_files_count))) self.account_dash_ui.buckets_stats_table.setItem( table_row_count, 2, QtGui.QTableWidgetItem(str(self.tools.human_size(total_bucket_files_size)))) total_files_count += total_bucket_files_count total_files_size += total_bucket_files_size self.account_dash_ui.files_total_count.setText(str(total_files_count)) self.account_dash_ui.total_used_space.setText(str(self.tools.human_size(total_files_size)))
mit
Python
53258a9ffd869dd958fd818874b2c8406acca143
add pytest for util.store
tobi-wan-kenobi/bumblebee-status,tobi-wan-kenobi/bumblebee-status
pytests/util/test_store.py
pytests/util/test_store.py
import pytest import util.store @pytest.fixture def emptyStore(): return util.store.Store() @pytest.fixture def store(): return util.store.Store() def test_get_of_unset_key(emptyStore): assert emptyStore.get("any-key") == None assert emptyStore.get("any-key", "default-value") == "default-value" def test_get_of_set_key(store): store.set("key", "value") assert store.get("key") == "value" def test_overwrite_set(store): store.set("key", "value 1") store.set("key", "value 2") assert store.get("key") == "value 2" def test_unused_keys(store): store.set("key 1", "value x") store.set("key 2", "value y") assert store.unused_keys() == sorted(["key 1", "key 2"]) store.get("key 2") assert store.unused_keys() == ["key 1"] store.get("key 1") assert store.unused_keys() == [] # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
mit
Python
5bde5b5904abc30506e56865cd58fd88a97942aa
Add `deprecated` decorator
tavultesoft/keymanweb,tavultesoft/keymanweb
linux/keyman-config/keyman_config/deprecated_decorator.py
linux/keyman-config/keyman_config/deprecated_decorator.py
#!/usr/bin/python3 # based on https://stackoverflow.com/a/40301488 import logging string_types = (type(b''), type(u'')) def deprecated(reason): if isinstance(reason, string_types): # The @deprecated is used with a 'reason'. def decorator(func1): def new_func1(*args, **kwargs): logging.warning("Call to deprecated function '{name}': {reason}.".format( name=func1.__name__, reason=reason)) return func1(*args, **kwargs) return new_func1 return decorator else: # The @deprecated is used without any 'reason'. def new_func2(*args, **kwargs): func2 = reason logging.warning("Call to deprecated function '{name}'.".format(name=func2.__name__)) return func2(*args, **kwargs) return new_func2
apache-2.0
Python
f6519493dd75d7f5a8b65a952b5d7048bd101ec4
Create locationanalysis.py
MagnoMithos/GoogleMapsLocationAnalysis
locationanalysis.py
locationanalysis.py
import json print 'test' f = open('location.json', 'r') jsoncontent = f.read() print jsoncontent location = json.loads(jsoncontent) print len(location)
mit
Python
f6d417e69efa4554008bc441a5c82a5b9f93a082
Add sql.conventions.objects.Items
clchiou/garage,clchiou/garage,clchiou/garage,clchiou/garage
garage/sql/conventions/objects.py
garage/sql/conventions/objects.py
__all__ = [ 'Items', ] from garage.functools import nondata_property from garage.sql.utils import insert_or_ignore, make_select_by class Items: """A thin layer on top of tables of two columns: (id, value)""" def __init__(self, table, id_name, value_name): self.table = table self.value_name = value_name self._select_ids = make_select_by( getattr(self.table.c, value_name), getattr(self.table.c, id_name), ) @nondata_property def conn(self): raise NotImplementedError def select_ids(self, values): return dict(self._select_ids(self.conn, values)) def insert(self, values): insert_or_ignore(self.conn, self.table, [ {self.value_name: value} for value in values ])
mit
Python
313f5c8c54002a736a323410c5d9ec96fcc2f50b
Create RespostaVer.py
AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb
backend/Models/Predio/RespostaVer.py
backend/Models/Predio/RespostaVer.py
from Framework.Resposta import Resposta from Models.Campus.Campus import Campus as ModelCampus class RespostaVer(Resposta): def __init__(self,campus): self.corpo = ModelCampus(campus)
mit
Python
95f5b7cd2325a61f537bffb783e950b30c97da5f
Add a demo about learning the shape parameter of gamma dist
bayespy/bayespy,fivejjs/bayespy,jluttine/bayespy,SalemAmeen/bayespy
bayespy/demos/gamma_shape.py
bayespy/demos/gamma_shape.py
from bayespy import nodes from bayespy.inference import VB def run(): a = nodes.GammaShape(name='a') b = nodes.Gamma(1e-5, 1e-5, name='b') tau = nodes.Gamma(a, b, plates=(1000,), name='tau') tau.observe(nodes.Gamma(10, 20, plates=(1000,)).random()) Q = VB(tau, a, b) Q.update(repeat=1000) print("True gamma parameters:", 10.0, 20.0) print("Estimated parameters from 1000 samples:", a.u[0], b.u[0]) if __name__ == "__main__": run()
mit
Python
3c618e8424e64a62168c2a2c683748d2496ef7cb
Add Urban Dictionary module.
billyvg/piebot
modules/urbandictionary.py
modules/urbandictionary.py
"""Looks up a term from urban dictionary @package ppbot @syntax ud <word> """ import requests import json from modules import * class Urbandictionary(Module): def __init__(self, *args, **kwargs): """Constructor""" Module.__init__(self, kwargs=kwargs) self.url = "http://www.urbandictionary.com/iphone/search/define?term=%s" def _register_events(self): """Register module commands.""" self.add_command('ud') def ud(self, event): """Action to react/respond to user calls.""" if self.num_args >= 1: word = '%20'.join(event['args']) r = requests.get(self.url % (word)) ur = json.loads(r.text) try: definition = ur['list'][0] message = "%(word)s (%(thumbs_up)d/%(thumbs_down)d): %(definition)s (ex: %(example)s)" % (definition) self.msg(event['target'], message) except KeyError: self.msg(event['target'], 'Could find word "%s"' % ' '.join(event['args'])) else: self.syntax_message(event['nick'], '.ud <word>')
mit
Python
d8fff759f2bff24f20cdbe98370ede9e5f3b7b13
Add 2D helmholtz convergence test
thomasgibson/firedrake-hybridization
convergence_tests/2D_helmholtz.py
convergence_tests/2D_helmholtz.py
from __future__ import absolute_import, division from firedrake import * import numpy as np def helmholtz_mixed(x, V1, V2): # Create mesh and define function space mesh = UnitSquareMesh(2**x, 2**x) V1 = FunctionSpace(mesh, *V1, name="V") V2 = FunctionSpace(mesh, *V2, name="P") W = V1 * V2 # Define variational problem lmbda = 1 u, p = TrialFunctions(W) v, q = TestFunctions(W) f = Function(V2) f.interpolate(Expression("(1+8*pi*pi)*sin(x[0]*pi*2)*sin(x[1]*pi*2)")) a = (p*q - q*div(u) + lmbda*inner(v, u) + div(v)*p) * dx L = f*q*dx # Compute solution x = Function(W) params = {'mat_type': 'matfree', 'ksp_type': 'preonly', 'pc_type': 'python', 'pc_python_type': 'firedrake.HybridizationPC', 'hybridization': {'ksp_type': 'preonly', 'pc_type': 'lu', 'hdiv_residual': {'ksp_type': 'cg', 'ksp_rtol': 1e-14}, 'use_reconstructor': True}} solve(a == L, x, solver_parameters=params) # Analytical solution f.interpolate(Expression("sin(x[0]*pi*2)*sin(x[1]*pi*2)")) u, p = x.split() err = sqrt(assemble(dot(p - f, p - f) * dx)) return x, err V1 = ('RT', 1) V2 = ('DG', 0) x, err = helmholtz_mixed(8, V1, V2) print err File("helmholtz_mixed.pvd").write(x.split()[0], x.split()[1]) l2errs = [] for i in range(1, 9): l2errs.append(helmholtz_mixed(i, V1, V2)[1]) l2errs = np.array(l2errs) conv = np.log2(l2errs[:-1] / l2errs[1:])[-1] print conv
mit
Python
5206a15d59bc8881629c48bb4136bb1a9cb7b4d0
Create ms_old_identifiers.py
Bindernews/TheHound
identifiers/ms_old_identifiers.py
identifiers/ms_old_identifiers.py
from identifier import * import collections CFBInfo = collections.namedtuple('CFBInfo', ['name', 'descripion', 'pattern']) OFFICE_PATTERNS = [ 'D0 CF 11 E0 A1 B1 1A E1' ] FILE_PATTERNS = [ CFBInfo('DOC', 'Microsoft Word 97-2003', bytes.fromhex('EC A5 C1 20')), CFBInfo('XLS', 'Microsoft Excel 97-2003', bytes.fromhex('09 08 10 20 20 06 05 20 A6 45 CD 07')), ] class CfbResolver: def identify(self, stream): data = stream.read(128) for filepat in FILE_PATTERNS: index = data.find(filepat.pattern) if index != -1: return Result(filepat.name, filepat.description) return Result('CFB') def load(hound): hound.add_matches(OFFICE_PATTERNS, CfbResolver())
mit
Python
835a7b9bea1b006b5a096665d706b64b778d45ab
fix default param
FederatedAI/FATE,FederatedAI/FATE,FederatedAI/FATE
python/federatedml/ensemble/test/hack_encrypter.py
python/federatedml/ensemble/test/hack_encrypter.py
class HackDecrypter(): def encrypt(self, val): return val def decrypt(self, val): return val
apache-2.0
Python
aeb671484bc8e68a8aba3eaa80523ae153b8e9c9
Add files via upload
fumen/kihon,fumen/kihon,fumen/kihon
youtube_list.py
youtube_list.py
from apiclient.discovery import build from apiclient.errors import HttpError from oauth2client.tools import argparser import pafy DEVELOPER_KEY = "AIzaSyCsrKjMf7_mHYrT6rIJ-oaA6KL5IYg389A" YOUTUBE_API_SERVICE_NAME = "youtube" YOUTUBE_API_VERSION = "v3" def youtube_search(options): youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=DEVELOPER_KEY) # Call the search.list method to retrieve results matching the specified # query term. search_response = youtube.search().list( q="Never Give", part="id,snippet" ).execute() videos = [] channels = [] playlists = [] # Add each result to the appropriate list, and then display the lists of # matching videos, channels, and playlists. for search_result in search_response.get("items", []): if search_result["id"]["kind"] == "youtube#video": videos.append("%s" % (search_result["id"]["videoId"])) print videos[0],"/n" #Papy audio stream URL audio = pafy.new(videos[0]) print audio.audiostreams[0].url if __name__ == "__main__": argparser.add_argument("--q", help="Search term", default="Google") argparser.add_argument("--max-results", help="Max results", default=25) args = argparser.parse_args() try: youtube_search(args) except HttpError, e: print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
mit
Python
0770fab7c4985704e2793ab98150c9f1a2729e01
Create easy_17_ArrayAdditionI.py
GabrielGhe/CoderbyteChallenges,GabrielGhe/CoderbyteChallenges
easy_17_ArrayAdditionI.py
easy_17_ArrayAdditionI.py
import itertools ################################################# # This function will see if there is any # # possible combination of the numbers in # # the array that will give the largest number # ################################################# def ArrayAdditionI(arr): #sort, remove last element result = "false" arr.sort() large = arr[-1] arr = arr[:-1] #go through every combination and see if sum = large for x in range(2,len(arr) + 1): for comb in itertools.combinations(arr,x): if large == sum(comb): result = "true" break return result print ArrayAdditionI(raw_input())
mit
Python
1cc15f3ae9a0b7fa5b2dae4bcdd9f0f3c061ce4d
Fix relate_name on Bug model
mozilla/reclama,mozilla/reclama,mozilla/reclama,mozilla/reclama
reclama/sprints/migrations/0002_auto_20150130_1751.py
reclama/sprints/migrations/0002_auto_20150130_1751.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('sprints', '0001_initial'), ] operations = [ migrations.AlterField( model_name='bug', name='event', field=models.ManyToManyField(related_name='bugs', to='sprints.Event'), preserve_default=True, ), ]
mpl-2.0
Python
de65724abf0a01660e413189d1738a72d5afd297
add simple test for create_wininst.
cournape/Bento,cournape/Bento,cournape/Bento,cournape/Bento
bento/commands/tests/test_wininst.py
bento/commands/tests/test_wininst.py
import os import shutil import tempfile import zipfile import os.path as op import mock import bento.commands.build_wininst from bento.commands.build_wininst \ import \ create_wininst from bento.compat.api.moves \ import \ unittest from bento.core.node \ import \ create_base_nodes from bento.installed_package_description \ import \ InstalledPkgDescription class TestWininstInfo(unittest.TestCase): def setUp(self): self.old_dir = None self.tmpdir = None self.old_dir = os.getcwd() self.tmpdir = tempfile.mkdtemp() try: self.top_node, self.build_node, self.run_node = \ create_base_nodes(self.tmpdir, op.join(self.tmpdir, "build")) os.chdir(self.tmpdir) except: shutil.rmtree(self.tmpdir) raise def tearDown(self): os.chdir(self.old_dir) shutil.rmtree(self.tmpdir) @mock.patch("bento.commands.build_wininst.create_exe", mock.MagicMock()) def test_simple(self): """This just tests whether create_wininst runs at all and produces a zip-file.""" ipackage = InstalledPkgDescription({}, {"name": "foo", "version": "1.0"}, {}) create_wininst(ipackage, self.build_node, self.build_node, wininst="foo.exe", output_dir="dist") arcname = bento.commands.build_wininst.create_exe.call_args[0][1] fp = zipfile.ZipFile(arcname) try: fp.namelist() finally: fp.close()
bsd-3-clause
Python
edfd6ddf8e7af41a8b5ed228360b92377bfc8964
add 167. First 200 problems have been finished!
zeyuanxy/project-euler,zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler,EdisonAlgorithms/ProjectEuler,EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler
vol4/167.py
vol4/167.py
import time def ulam(a, b): yield a yield b u = [a, b] even_element = 0 while even_element == 0 or u[-1] < 2 * even_element: sums = {} for i in range(len(u)): for j in range(i + 1, len(u)): sums[u[i] + u[j]] = sums.get(u[i] + u[j], 0) + 1 u.append(min(k for k, v in sums.iteritems() if v == 1 and k > u[-1])) yield u[-1] if u[-1] % 2 == 0: even_element = u[-1] index = 0 while even_element + u[index] <= u[-1]: index += 1 while True: if even_element + u[index] > u[-1] + 2: u.append(u[-1] + 2) else: u.append(even_element + u[index + 1]) index = index + 2 yield u[-1] if __name__ == "__main__": N = 10 ** 11 ans = 0 periods = [32, 26, 444, 1628, 5906, 80, 126960, 380882, 2097152] diffs = [126, 126, 1778, 6510, 23622, 510, 507842, 1523526, 8388606] for n in range(2, 11): u = ulam(2, 2 * n + 1) index = 0 even = False while not even or (N - index) % periods[n - 2] != 0: num = u.next() if num % 2 == 0 and num > 2: even = True index += 1 ans += num + (N - index) / periods[n - 2] * diffs[n - 2] print ans
mit
Python
e4d222c4e1b05f8d34b2236d05269827c345b0c7
Handle also running rebot
moto-timo/robotframework,yahman72/robotframework,stasiek/robotframework,suvarnaraju/robotframework,alexandrul-ci/robotframework,joongh/robotframework,nmrao/robotframework,JackNokia/robotframework,ashishdeshpande/robotframework,rwarren14/robotframework,fingeronthebutton/robotframework,un33k/robotframework,Colorfulstan/robotframework,kyle1986/robortframe,ashishdeshpande/robotframework,userzimmermann/robotframework,jaloren/robotframework,snyderr/robotframework,stasiek/robotframework,fingeronthebutton/robotframework,wojciechtanski/robotframework,robotframework/robotframework,dkentw/robotframework,SivagnanamCiena/robotframework,alexandrul-ci/robotframework,suvarnaraju/robotframework,snyderr/robotframework,xiaokeng/robotframework,JackNokia/robotframework,nmrao/robotframework,SivagnanamCiena/robotframework,synsun/robotframework,joongh/robotframework,suvarnaraju/robotframework,HelioGuilherme66/robotframework,kyle1986/robortframe,ChrisHirsch/robotframework,snyderr/robotframework,dkentw/robotframework,snyderr/robotframework,dkentw/robotframework,nmrao/robotframework,un33k/robotframework,rwarren14/robotframework,Colorfulstan/robotframework,jaloren/robotframework,ashishdeshpande/robotframework,nmrao/robotframework,ChrisHirsch/robotframework,un33k/robotframework,wojciechtanski/robotframework,jorik041/robotframework,yahman72/robotframework,synsun/robotframework,rwarren14/robotframework,kurtdawg24/robotframework,edbrannin/robotframework,yonglehou/robotframework,ashishdeshpande/robotframework,yonglehou/robotframework,edbrannin/robotframework,yonglehou/robotframework,yahman72/robotframework,ChrisHirsch/robotframework,edbrannin/robotframework,robotframework/robotframework,yonglehou/robotframework,kurtdawg24/robotframework,HelioGuilherme66/robotframework,un33k/robotframework,alexandrul-ci/robotframework,kyle1986/robortframe,stasiek/robotframework,userzimmermann/robotframework,moto-timo/robotframework,jorik041/robotframework,suvarnaraju/robotframework,joongh/robotframework,xiaokeng/robotframework,fingeronthebutton/robotframework,synsun/robotframework,kurtdawg24/robotframework,stasiek/robotframework,rwarren14/robotframework,SivagnanamCiena/robotframework,snyderr/robotframework,edbrannin/robotframework,SivagnanamCiena/robotframework,stasiek/robotframework,Colorfulstan/robotframework,fingeronthebutton/robotframework,SivagnanamCiena/robotframework,jaloren/robotframework,moto-timo/robotframework,jorik041/robotframework,ChrisHirsch/robotframework,yahman72/robotframework,jorik041/robotframework,wojciechtanski/robotframework,kyle1986/robortframe,Colorfulstan/robotframework,userzimmermann/robotframework,yahman72/robotframework,HelioGuilherme66/robotframework,moto-timo/robotframework,xiaokeng/robotframework,xiaokeng/robotframework,dkentw/robotframework,Colorfulstan/robotframework,joongh/robotframework,ashishdeshpande/robotframework,kurtdawg24/robotframework,userzimmermann/robotframework,un33k/robotframework,rwarren14/robotframework,jaloren/robotframework,wojciechtanski/robotframework,JackNokia/robotframework,eric-stanley/robotframework,robotframework/robotframework,dkentw/robotframework,jorik041/robotframework,userzimmermann/robotframework,ChrisHirsch/robotframework,JackNokia/robotframework,moto-timo/robotframework,synsun/robotframework,fingeronthebutton/robotframework,kyle1986/robortframe,synsun/robotframework,suvarnaraju/robotframework,xiaokeng/robotframework,eric-stanley/robotframework,eric-stanley/robotframework,wojciechtanski/robotframework,yonglehou/robotframework,joongh/robotframework,eric-stanley/robotframework,kurtdawg24/robotframework,nmrao/robotframework,JackNokia/robotframework,jaloren/robotframework,alexandrul-ci/robotframework,alexandrul-ci/robotframework,edbrannin/robotframework
src/robot/jarrunner.py
src/robot/jarrunner.py
# Copyright 2008-2010 Nokia Siemens Networks Oyj # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from org.robotframework import RobotRunner from robot import runner, run_from_cli, rebot, rebot_from_cli class JarRunner(RobotRunner): """Used for Java-Jython interop when RF is executed from .jar file""" def run(self, args): print rebot, rebot.__file__ try: if args and args[0] == 'rebot': print rebot.__doc__ rebot_from_cli(args[1:], rebot.__doc__) else: run_from_cli(args, runner.__doc__) except SystemExit, err: return err.code
# Copyright 2008-2010 Nokia Siemens Networks Oyj # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from org.robotframework import RobotRunner from robot import runner, run_from_cli class JarRunner(RobotRunner): """Used for Java-Jython interop when RF is executed from .jar file""" def run(self, args): try: run_from_cli(args, runner.__doc__) except SystemExit, err: return err.code
apache-2.0
Python
76399574b7fb914d1baa2719a0e493d4b22bb730
Create PedidoEditar.py
AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb
backend/Models/Grau/PedidoEditar.py
backend/Models/Grau/PedidoEditar.py
from Framework.Pedido import Pedido from Framework.ErroNoHTTP import ErroNoHTTP class PedidoEditar(Pedido): def __init__(self,variaveis_do_ambiente): super(PedidoEditar, self).__init__(variaveis_do_ambiente) try: self.nome = self.corpo['nome'] except: raise ErroNoHTTP(400) def getNome(self): return self.nome
mit
Python
df146818d004e65102cc6647373b0fddb0d383fd
add basic integration tests
objectified/vdist,objectified/vdist
integration-tests/test_builder.py
integration-tests/test_builder.py
import os import subprocess import tempfile from vdist.builder import Builder from vdist.source import git, git_directory, directory def test_generate_deb_from_git(): builder = Builder() builder.add_build( app='vdist-test-generate-deb-from-git', version='1.0', source=git( uri='https://github.com/objectified/vdist', branch='master' ), profile='ubuntu-trusty' ) builder.build() cwd = os.getcwd() target_file = os.path.join( cwd, 'dist', 'vdist-test-generate-deb-from-git-1.0-ubuntu-trusty', 'vdist-test-generate-deb-from-git_1.0_amd64.deb' ) assert os.path.isfile(target_file) assert os.path.getsize(target_file) > 0 def test_generate_deb_from_git_directory(): tempdir = tempfile.gettempdir() checkout_dir = os.path.join(tempdir, 'vdist') git_p = subprocess.Popen( ['git', 'clone', 'https://github.com/objectified/vdist', checkout_dir]) git_p.communicate() builder = Builder() builder.add_build( app='vdist-test-generate-deb-from-git-dir', version='1.0', source=git_directory( path=checkout_dir, branch='master' ), profile='ubuntu-trusty' ) builder.build() cwd = os.getcwd() target_file = os.path.join( cwd, 'dist', 'vdist-test-generate-deb-from-git-dir-1.0-ubuntu-trusty', 'vdist-test-generate-deb-from-git-dir_1.0_amd64.deb' ) assert os.path.isfile(target_file) assert os.path.getsize(target_file) > 0 def test_generate_deb_from_directory(): tempdir = tempfile.gettempdir() checkout_dir = os.path.join(tempdir, 'vdist') git_p = subprocess.Popen( ['git', 'clone', 'https://github.com/objectified/vdist', checkout_dir]) git_p.communicate() builder = Builder() builder.add_build( app='vdist-test-generate-deb-from-dir', version='1.0', source=directory( path=checkout_dir, ), profile='ubuntu-trusty' ) builder.build() cwd = os.getcwd() target_file = os.path.join( cwd, 'dist', 'vdist-test-generate-deb-from-dir-1.0-ubuntu-trusty', 'vdist-test-generate-deb-from-dir_1.0_amd64.deb' ) assert os.path.isfile(target_file) assert os.path.getsize(target_file) > 0
mit
Python
dc0ecffd6c4115019cfcbcc13b17a20511888c9b
Add ut for fused ops
chengduoZH/Paddle,baidu/Paddle,tensor-tang/Paddle,chengduoZH/Paddle,baidu/Paddle,baidu/Paddle,luotao1/Paddle,baidu/Paddle,luotao1/Paddle,baidu/Paddle,luotao1/Paddle,luotao1/Paddle,PaddlePaddle/Paddle,PaddlePaddle/Paddle,luotao1/Paddle,PaddlePaddle/Paddle,chengduoZH/Paddle,PaddlePaddle/Paddle,tensor-tang/Paddle,luotao1/Paddle,chengduoZH/Paddle,PaddlePaddle/Paddle,tensor-tang/Paddle,PaddlePaddle/Paddle,chengduoZH/Paddle,PaddlePaddle/Paddle,tensor-tang/Paddle,tensor-tang/Paddle,luotao1/Paddle
python/paddle/fluid/tests/unittests/test_fused_emb_seq_pool_op.py
python/paddle/fluid/tests/unittests/test_fused_emb_seq_pool_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np from op_test import OpTest import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid.op import Operator import paddle.compat as cpt class TestFusedEmbeddingSeqPoolOp(OpTest): def setUp(self): self.op_type = "fused_embedding_seq_pool" self.emb_size = 2 table = np.random.random((17, self.emb_size)).astype("float32") ids = np.array([[[4], [3]], [[4], [3]], [[2], [1]], [[16], [1]]]).astype("int64") merged_ids = np.array([4, 2, 16]).astype("int64") ids_expand = np.expand_dims(ids, axis=1) self.lod = [[3, 1]] self.attrs = {'is_sparse': True} self.inputs = {'W': table, 'Ids': (ids_expand, self.lod)} self.outputs = { 'Out': np.reshape( np.array([ table[[4, 3]] + table[[4, 3]] + table[[2, 1]], table[[16, 1]] ]), [len(self.lod[0]), 2 * self.emb_size]) } def test_check_output(self): self.check_output() if __name__ == "__main__": unittest.main()
apache-2.0
Python
a852de81afdf8426cb243115a87856e2767a8d40
Add construct test for known bad inplace string operations.
kayhayen/Nuitka,tempbottle/Nuitka,tempbottle/Nuitka,tempbottle/Nuitka,tempbottle/Nuitka,kayhayen/Nuitka,kayhayen/Nuitka,wfxiang08/Nuitka,wfxiang08/Nuitka,wfxiang08/Nuitka,kayhayen/Nuitka,wfxiang08/Nuitka
tests/benchmarks/constructs/InplaceOperationStringAdd.py
tests/benchmarks/constructs/InplaceOperationStringAdd.py
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com # # Python test originally created or extracted from other peoples work. The # parts from me are licensed as below. It is at least Free Softwar where # it's copied from other people. In these cases, that will normally be # indicated. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # module_value1 = 5 module_value2 = 3 def calledRepeatedly(): # Force frame and eliminate forward propagation (currently). module_value1 # Make sure we have a local variable x anyway s = "2" additiv = "*" * 1000 local_value = module_value1 for x in range(local_value, local_value+15): # construct_begin s += additiv # construct_end pass for x in xrange(50000): calledRepeatedly() print("OK.")
apache-2.0
Python
60880e780d611f32a3358bcae76f4eed22feb2d7
Create a module to add "Basic string search algorithms"
fleith/coding,fleith/coding,fleith/coding
string_search.py
string_search.py
'''String search algorithms''' _naive_iterations = 0 def naive_search(string, pattern): '''Naïve string search algorithm Pseudo code: string[1..n] and pattern[1..m] for i from 1 to n-m+1 for j from 1 to m if s[i+j-1] ≠ pattern[j] jump to next iteration of outer loop return i return not found :param string: string :param pattern: pattern to search :return: position in the string where the pattern start or None :reference: https://en.wikipedia.org/wiki/Rabin–Karp_algorithm ''' for i in range(0, len(string) - len(pattern) + 1): global _naive_iterations _naive_iterations += 1 def search(): for j in range(0, len(pattern)): global _naive_iterations _naive_iterations += 1 if string[i+j] != pattern[j]: return None return i value = search() if value != None: return value return None def test_naive_search(): '''Naïve string search algorithm test using Py.Test''' global _naive_iterations _naive_iterations = 0 assert naive_search("abc", "wowxyzabcnice") == None print(_naive_iterations) _naive_iterations = 0 assert naive_search("abc", "wowxyzacbnice") == None print(_naive_iterations) _naive_iterations = 0 assert naive_search("wowxyzabcnice", "abc") == 6 print(_naive_iterations) _naive_iterations = 0 assert naive_search("wowxyzacbnice", "abc") == None print(_naive_iterations) _naive_iterations = 0 assert naive_search("wowxyzniceabc", "abc") == 10 print(_naive_iterations) _naive_iterations = 0 assert naive_search("abcwowxyznice", "abc") == 0 print(_naive_iterations) _naive_iterations = 0 assert naive_search("abc", "abc") == 0 print(_naive_iterations) _naive_iterations = 0 assert naive_search("abc", "") == 0 print(_naive_iterations) _naive_iterations = 0 assert naive_search("", "") == 0 print(_naive_iterations) _naive_iterations = 0 def rabin_karp(string, pattern): '''Rabin–Karp string search algorithm Pseudo Code: function RabinKarp(string s[1..n], string pattern[1..m]) hpattern := hash(pattern[1..m]); for i from 1 to n-m+1 hs := hash(s[i..i+m-1]) if hs = hpattern if s[i..i+m-1] = pattern[1..m] return i return not found ''' hash_pattern = hash(pattern) for i in range(0, len(string) - len(pattern) + 1): global _rabin_iterations _rabin_iterations += 1 hash_string = hash(string[i:i+len(pattern)]) if hash_string == hash_pattern: if string[i:i+len(pattern)] == pattern[:len(pattern)]: return i return None _rabin_iterations = 0 def test_rabin_karp(): '''Naïve string search algorithm test using Py.Test''' print("-------------") global _rabin_iterations assert rabin_karp("abc", "wowxyzabcnice") == None print(_rabin_iterations) _rabin_iterations = 0 assert rabin_karp("abc", "wowxyzacbnice") == None print(_rabin_iterations) _rabin_iterations = 0 assert rabin_karp("wowxyzabcnice", "abc") == 6 print(_rabin_iterations) _rabin_iterations = 0 assert rabin_karp("wowxyzacbnice", "abc") == None print(_rabin_iterations) _rabin_iterations = 0 assert rabin_karp("wowxyzniceabc", "abc") == 10 print(_rabin_iterations) _rabin_iterations = 0 assert rabin_karp("abcwowxyznice", "abc") == 0 print(_rabin_iterations) _rabin_iterations = 0 assert rabin_karp("abc", "abc") == 0 print(_rabin_iterations) _rabin_iterations = 0 assert rabin_karp("abc", "") == 0 print(_rabin_iterations) _rabin_iterations = 0 assert rabin_karp("", "") == 0 print(_rabin_iterations) _rabin_iterations = 0
unlicense
Python
28df83848a04e45059f4c672fde53f4f84dbd28d
Add module module_pubivisat.py
hannupekka/pyfibot-modules
module_pubivisat.py
module_pubivisat.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import urllib2 from bs4 import BeautifulSoup def command_pubivisat(bot, user, channel, args): """Fetches todays pub quizzes for Tampere from pubivisat.fi""" url = "http://pubivisat.fi/tampere" f = urllib2.urlopen(url) d = f.read() f.close() bs = BeautifulSoup(d) data = bs.find('table', {'class': 'quiz_list'}).find('tbody').findAll('tr') quizzes = [] for row in data: name = row.find('a').string time = row.findAll('td')[1].string quizzes.append("%s: %s" % (str(name), str(time))) output = ' | '.join(reversed(quizzes)) return(bot.say(channel, output))
apache-2.0
Python
e5736370568adab1334f653c44dd060c06093fae
add basic twisted soap server.
martijnvermaat/rpclib,martijnvermaat/rpclib,martijnvermaat/rpclib,arskom/spyne,arskom/spyne,arskom/spyne
src/rpclib/test/interop/server/soap_http_basic_twisted.py
src/rpclib/test/interop/server/soap_http_basic_twisted.py
#!/usr/bin/env python # # rpclib - Copyright (C) Rpclib contributors. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 # import logging logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger('rpclib.wsgi') logger.setLevel(logging.DEBUG) import os from rpclib.test.interop.server.soap_http_basic import soap_application from rpclib.server.twisted_ import TwistedWebApplication host = '127.0.0.1' port = 9753 def main(argv): from twisted.python import log from twisted.web.server import Site from twisted.web.static import File from twisted.internet import reactor from twisted.python import log observer = log.PythonLoggingObserver('twisted') log.startLoggingWithObserver(observer.emit, setStdout=False) wr = TwistedWebApplication(soap_application) site = Site(wr) reactor.listenTCP(port, site) logging.info("listening on: %s:%d" % (host,port)) return reactor.run() if __name__ == '__main__': import sys sys.exit(main(sys.argv))
lgpl-2.1
Python
e83edea432f16ed6a2c9edcaa6da70c928d75eb5
Create module containing constants
khalim19/gimp-plugin-export-layers,khalim19/gimp-plugin-export-layers
export_layers/pygimplib/constants.py
export_layers/pygimplib/constants.py
# # This file is part of pygimplib. # # Copyright (C) 2014, 2015 khalim19 <khalim19@gmail.com> # # pygimplib is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # pygimplib is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pygimplib. If not, see <http://www.gnu.org/licenses/>. # """ This module contains constants used in other modules. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals str = unicode #=============================================================================== _LOG_OUTPUT_MODES = (LOG_EXCEPTIONS_ONLY, LOG_OUTPUT_FILES, LOG_OUTPUT_GIMP_CONSOLE) = (0, 1, 2)
bsd-3-clause
Python
0421adb2eb391c57d02dfa0b1b14e3c620c53dfc
Create tarea7.py
JOSUEXLION/prog3-uip,JOSUEXLION/prog3-uip
tareas/tarea7.py
tareas/tarea7.py
#josue de leon # Tarea 7 #8-876-2357 '''1.Crear una aplicacion en Kivy que maneje un registro de asistencia. Basicamente la aplicacion debe contener una etiqueta que diga "Nombre: ", un campo para ingresar cadenas de texto, un boton que diga "Guardar" y otro botin que diga "Exportar". El botn para guardar agrega el contenido del campo a una lista de asistencia. El boton para exportar salva la lista de asistencia a un fichero con extension TXT''' import listasutils as lu from kivy.app import App from kivy.config import Config from kivy.uix.boxlayout import BoxLayout from kivy.uix.label import Label from kivy.uix.floatlayout import FloatLayout from kivy.properties import ObjectProperty Config.set('graphics', 'resizable', '0') Config.set('graphics', 'width', '640') Config.set('graphics', 'height', '480') class Fondo(FloatLayout): lista = [] listgrid = ObjectProperty(None) textbox = ObjectProperty(None) def OnGuardarClick(self,texto): if texto != "": grid = self.listgrid grid.bind(minimum_height=grid.setter('height'), minimum_width=grid.setter('width')) self.textbox.text = '' self.lista.append(texto) RowNombre = Label(text='{0}'.format(texto)) grid.add_widget(RowNombre) def OnExportarClick(self): lu.salvarLista("Tarea7.txt",self.lista) class AsistenciaApp(App): def build(self): return Fondo() if __name__ == '__main__': AsistenciaApp().run() <Fondo>: scroll_view: scrollviewID listgrid: gridlayoutID textbox: textboxID BoxLayout: orientation: 'vertical' size_hint: 1,0.1 pos_hint: {'top':1} BoxLayout: orientation: 'horizontal' Label: text: 'Nombre' TextInput: id: textboxID multiline: False BoxLayout: orientation: 'vertical' size_hint: 1,0.8 pos_hint: {'top':0.9} canvas: Color: rgba: (0.2, 0.2, 0.2, 1) Rectangle: pos: self.pos size: self.size ScrollView: id: scrollviewID orientation: 'vertical' pos_hint: {'x': 0, 'y': 0} bar_width: '20dp' GridLayout: id: gridlayoutID cols: 1 size_hint: 1, None row_default_height: 40 row_force_default: False BoxLayout: canvas: Color: rgba: (0.4, 0.4, 0.4, 1) Rectangle: pos: self.pos size: self.size Label: text: 'Nombre' BoxLayout: orientation: 'vertical' size_hint: 1,0.1 pos_hint: {'top':0.1} BoxLayout: orientation: 'horizontal' Button: text: 'Guardar' on_release: root.OnGuardarClick(textboxID.text) Button: text: 'Exportar' on_release: root.OnExportarClick()
mit
Python
b260040bc3ca48b4e76d73c6efe60b964fa5c108
Add test of removing unreachable terminals
PatrikValkovic/grammpy
tests/UnreachableSymbolsRemove/RemovingTerminalsTest.py
tests/UnreachableSymbolsRemove/RemovingTerminalsTest.py
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 17.08.2017 14:23 :Licence GNUv3 Part of grammpy-transforms """ from unittest import main, TestCase from grammpy import * from grammpy_transforms import * class A(Nonterminal): pass class B(Nonterminal): pass class C(Nonterminal): pass class D(Nonterminal): pass class E(Nonterminal): pass class F(Nonterminal): pass class RuleAto0B(Rule): rule = ([A], [0, B]) class RuleBto1C(Rule): rule = ([B], [1, C]) class RuleCto2C(Rule): rule = ([C], [2, C]) class RemovingTerminalsTest(TestCase): def test_removingTerminals(self): g = Grammar(terminals=[0, 1, 2, 3], nonterminals=[A, B, C, D, E, F], rules=[RuleAto0B, RuleBto1C, RuleCto2C], start_symbol=A) com = ContextFree.remove_unreachable_symbols(g) self.assertTrue(com.have_term([0, 1, 2])) self.assertFalse(com.have_term(3)) self.assertTrue(com.have_nonterm([A, B, C])) self.assertFalse(com.have_nonterm(D)) self.assertFalse(com.have_nonterm(E)) self.assertFalse(com.have_nonterm(F)) def test_removingTerminalsShouldNotChange(self): g = Grammar(terminals=[0, 1, 2, 3], nonterminals=[A, B, C, D, E, F], rules=[RuleAto0B, RuleBto1C, RuleCto2C], start_symbol=A) ContextFree.remove_unreachable_symbols(g) self.assertTrue(g.have_term([0, 1, 2, 3])) self.assertTrue(g.have_nonterm([A, B, C, D, E, F])) def test_removingTerminalsShouldChange(self): g = Grammar(terminals=[0, 1, 2, 3], nonterminals=[A, B, C, D, E, F], rules=[RuleAto0B, RuleBto1C, RuleCto2C], start_symbol=A) ContextFree.remove_unreachable_symbols(g, transform_grammar=True) self.assertTrue(g.have_term([0, 1, 2])) self.assertFalse(g.have_term(3)) self.assertTrue(g.have_nonterm([A, B, C])) self.assertFalse(g.have_nonterm(D)) self.assertFalse(g.have_nonterm(E)) self.assertFalse(g.have_nonterm(F)) if __name__ == '__main__': main()
mit
Python
c82473efdeb7b1713f44370de761ec9022d02b5e
Add management command to fill and clear cache
akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr
akvo/rsr/management/commands/populate_project_directory_cache.py
akvo/rsr/management/commands/populate_project_directory_cache.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Akvo Reporting is covered by the GNU Affero General Public License. # See more details in the license.txt file located at the root folder of the Akvo RSR module. # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. """Populate the project directory cache for all the projects. Usage: python manage.py populate_project_directory_cache """ from django.core.management.base import BaseCommand from akvo.rest.views.project import serialized_project from akvo.rest.cache import delete_project_from_project_directory_cache from akvo.rsr.models import Project class Command(BaseCommand): help = __doc__ def add_arguments(self, parser): parser.add_argument('action', choices=['clear', 'fill'], help='Action to perform') def handle(self, *args, **options): projects = Project.objects.public().published().values_list('pk', flat=True) if options['action'] == 'clear': for project_id in projects: delete_project_from_project_directory_cache(project_id) else: for project_id in projects: serialized_project(project_id)
agpl-3.0
Python
4e6f2ede0a8a9291befe262cbec77d3e7cd873b0
add new package (#26514)
LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack
var/spack/repos/builtin/packages/py-rsatoolbox/package.py
var/spack/repos/builtin/packages/py-rsatoolbox/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyRsatoolbox(PythonPackage): """Representational Similarity Analysis (RSA) in Python.""" homepage = "https://github.com/rsagroup/rsatoolbox" pypi = "rsatoolbox/rsatoolbox-0.0.3.tar.gz" version('0.0.3', sha256='9bf6e16d9feadc081f9daaaaab7ef38fc1cd64dd8ef0ccd9f74adb5fe6166649') depends_on('py-setuptools', type='build') depends_on('py-coverage', type=('build', 'run')) depends_on('py-numpy@1.21.2:', type=('build', 'run')) depends_on('py-scipy', type=('build', 'run')) depends_on('py-scikit-learn', type=('build', 'run')) depends_on('py-scikit-image', type=('build', 'run')) depends_on('py-tqdm', type=('build', 'run')) depends_on('py-h5py', type=('build', 'run')) depends_on('py-matplotlib', type=('build', 'run')) depends_on('py-joblib', type=('build', 'run')) def patch(self): # tests are looking for a not existing requirements.txt file with working_dir('tests'): open('requirements.txt', 'a').close()
lgpl-2.1
Python
0ac0c81a3427f35447f52c1643229f5dbe607002
Add a merge migration and bring up to date
mfraezz/osf.io,erinspace/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,HalcyonChimera/osf.io,adlius/osf.io,erinspace/osf.io,cslzchen/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,Johnetordoff/osf.io,icereval/osf.io,mfraezz/osf.io,icereval/osf.io,caseyrollins/osf.io,caseyrollins/osf.io,baylee-d/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,brianjgeiger/osf.io,icereval/osf.io,saradbowman/osf.io,adlius/osf.io,Johnetordoff/osf.io,saradbowman/osf.io,cslzchen/osf.io,felliott/osf.io,mfraezz/osf.io,felliott/osf.io,adlius/osf.io,aaxelb/osf.io,cslzchen/osf.io,pattisdr/osf.io,sloria/osf.io,brianjgeiger/osf.io,felliott/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,baylee-d/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,felliott/osf.io,pattisdr/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,CenterForOpenScience/osf.io,mattclark/osf.io,brianjgeiger/osf.io,erinspace/osf.io,mfraezz/osf.io,cslzchen/osf.io,caseyrollins/osf.io,pattisdr/osf.io,sloria/osf.io,sloria/osf.io,Johnetordoff/osf.io,baylee-d/osf.io,mattclark/osf.io
osf/migrations/0099_merge_20180426_0930.py
osf/migrations/0099_merge_20180426_0930.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.11 on 2018-04-26 14:30 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('osf', '0098_merge_20180416_1807'), ('osf', '0096_add_provider_doi_prefixes'), ] operations = [ ]
apache-2.0
Python
2731aba68f86c0adcb26f4105c7418ffa35e3d09
add first auto-test
JonathonReinhart/bgrep,JonathonReinhart/bgrep
test/run_test.py
test/run_test.py
#!/usr/bin/env python import sys import os from subprocess import Popen, PIPE import re class TestFailure(Exception): pass def do_bgrep(pattern, paths, options=[], retcode=0): bgrep_path = '../bgrep' args = [bgrep_path] args += list(options) args.append(pattern.encode('hex')) args += list(paths) p = Popen(args, stdout=PIPE) stdout, stderr = p.communicate() if p.returncode != retcode: raise TestFailure('Return code: {0}, expected: {1}'.format(p.returncode, retcode)) pat = re.compile('^(.*):(0x[0-9A-Fa-f]+).*') result = {} for line in stdout.splitlines(): m = pat.match(line) if not m: continue filename = m.group(1) offset = int(m.group(2), 16) if not filename in result: result[filename] = [] result[filename].append(offset) return result def assert_equal(expected, actual): if not expected == actual: raise TestFailure('Expected: {0}, Actual {1}'.format(expected, actual)) def single_test(data, pattern, offsets): filename = 'test.bin' with open(filename, 'wb') as f: f.write(data) try: for retfilename, retoffsets in do_bgrep(pattern, [filename]).iteritems(): assert_equal(filename, retfilename) assert_equal(set(offsets), set(retoffsets)) finally: os.remove(filename) def test1(): n = 100 pattern = '\x12\x34\x56\x78' data = '\0'*n + pattern + '\0'*n offsets = [n] single_test(data, pattern, offsets) all_tests = [ test1, ] def main(): for t in all_tests: name = t.__name__ print '{0}: Starting'.format(name) try: t() except TestFailure as tf: print '{0}: Failure: {1}'.format(name, tf) else: print '{0}: Success'.format(name) if __name__ == '__main__': main()
apache-2.0
Python
d6315d28ed55b76f3caa3fff26141815f7da7dec
add migration
masschallenge/django-accelerator,masschallenge/django-accelerator
accelerator/migrations/0027_modify_video_url_help_text.py
accelerator/migrations/0027_modify_video_url_help_text.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.14 on 2018-12-05 16:27 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import embed_video.fields class Migration(migrations.Migration): dependencies = [ ('accelerator', '0026_startup_acknowledgement'), ] operations = [ migrations.AlterField( model_name='startup', name='additional_industries', field=models.ManyToManyField( blank=True, db_table='accelerator_startup_related_industry', help_text='You may select up to 5 related industries.', related_name='secondary_startups', to=settings.MPTT_SWAPPABLE_INDUSTRY_MODEL, verbose_name='Additional Industries'), ), migrations.AlterField( model_name='startup', name='video_elevator_pitch_url', field=embed_video.fields.EmbedVideoField( blank=True, help_text=( 'Upload your 1-3 minute video pitch to Vimeo or ' 'Youtube. Paste the shared link here.'), max_length=100), ), ]
mit
Python
d573d33cc37ad666d3a4f47a5ac9dfec5a9b5fc5
add app config
imndszy/voluntary,StarInworld/voluntary,StarInworld/voluntary,StarInworld/voluntary,imndszy/voluntary,imndszy/voluntary
app/appconfig.py
app/appconfig.py
# -*- coding:utf8 -*- # Author: shizhenyu96@gamil.com # github: https://github.com/imndszy HOST = "https://www.njuszy.cn/"
mit
Python
592145cf644262a21d9f5ac8850c1d59eeac83fe
bring over from other repo
toomanycats/IndeedScraper,toomanycats/IndeedScraper,toomanycats/IndeedScraper,toomanycats/IndeedScraper
GrammarParser.py
GrammarParser.py
''' https://gist.github.com/alexbowe/879414#file-nltk-intro-py-L34''' from nltk.corpus import stopwords import nltk stopwords = stopwords.words('english') lemmatizer = nltk.WordNetLemmatizer() stemmer_alt = nltk.stem.porter.PorterStemmer() # Used when tokenizing words sentence_re = r'''(?x) # set flag to allow verbose regexps ([A-Z])(\.[A-Z])+\.? # abbreviations, e.g. U.S.A. | \w+(-\w+)* # words with optional internal hyphens | \$?\d+(\.\d+)?%? # currency and percentages, e.g. $12.40, 82% | \.\.\. # ellipsis | [][.,;"'?():-_`] # these are separate tokens ''' # This grammar is from: S. N. Kim, T. Baldwin, and M.-Y. Kan. # Evaluating n-gram based evaluation metrics for automatic keyphrase extraction. # Technical report, University of Melbourne, Melbourne 2010. grammar = r""" NBAR: {<NN.*|JJ>*<NN.*>} # Nouns and Adjectives, terminated with Nouns NP: {<NBAR>} {<NBAR><IN><NBAR>} # Above, connected with in/of/etc... """ class GrammarParser(object): """Fancier preprocessing of corpus using a grammar.""" def leaves(self, tree): """Finds NP (nounphrase) leaf nodes of a chunk tree.""" for subtree in tree.subtrees(filter=lambda t:t.label() == 'NP'): yield subtree.leaves() def normalise(self, word): """Normalises words to lowercase and stems and lemmatizes it.""" word = word.lower() word = stemmer_alt.stem_word(word) word = lemmatizer.lemmatize(word) return word def acceptable_word(self, word): """Checks conditions for acceptable word: length, stopword.""" accepted = bool(2 <= len(word) <= 20 and word.lower() not in stopwords) return accepted def get_terms(self, tree): """Filters the main tree and it's subtrees for 'leaves', normalizes the words in the leaves and returns a generator.""" for leaf in self.leaves(tree): term = [ self.normalise(w) for w,_ in leaf if self.acceptable_word(w) ] yield term def get_words(self, terms): """Loops over the terms and returns a single string of the words.""" out = [] for term in terms: for word in term: out.append(word) return " ".join(out) def main(self, text): """Breaks a single string into a tree using the grammar and returns the specified words as a string.""" if text is None: return None chunker = nltk.RegexpParser(grammar) toks = nltk.regexp_tokenize(text, sentence_re) postoks = nltk.tag.pos_tag(toks) #print postoks tree = chunker.parse(postoks) terms = self.get_terms(tree) words = self.get_words(terms) return words
mit
Python
ffc32773953da2cf9e1d6e84aed1b53debc2c7c7
Create __init__.py
TylerKirby/cltk,kylepjohnson/cltk,D-K-E/cltk,diyclassics/cltk,cltk/cltk,LBenzahia/cltk,TylerKirby/cltk,LBenzahia/cltk
cltk/stem/middle_english/__init__.py
cltk/stem/middle_english/__init__.py
mit
Python
c7529927174b1626a0dc34f635b1d5939f565add
Add problem77.py
mjwestcott/projecteuler,mjwestcott/projecteuler,mjwestcott/projecteuler
euler_python/problem77.py
euler_python/problem77.py
""" problem77.py It is possible to write ten as the sum of primes in exactly five different ways: 7 + 3 5 + 5 5 + 3 + 2 3 + 3 + 2 + 2 2 + 2 + 2 + 2 + 2 What is the first value which can be written as the sum of primes in over five thousand different ways? """ from itertools import count, takewhile from toolset import get_primes, memoize_mutable @memoize_mutable def num_partitions(n, primes): # Using a slightly different algorithm than problem 76. # This one is adapted from SICP: https://mitpress.mit.edu/sicp/full-text/book/book-Z-H-11.html # See the section entitled 'Example: Counting change'. Their logic is # more intuitive than that which I presented in the previous problem. if n < 0: return 0 elif n == 0: return 1 elif primes == []: return 0 else: return num_partitions(n, primes[1:]) + num_partitions(n - primes[0], primes) def problem77(): primes = list(takewhile(lambda x: x < 100, get_primes())) return next(filter(lambda x: num_partitions(x, primes) > 5000, count(2)))
mit
Python
11cbc92e292a54b219f8b5ec64ae8ab58577362d
add standalone davidson test w/ near-degeneracies
jjgoings/McMurchie-Davidson
tests/test021.py
tests/test021.py
import numpy as np from mmd.utils.davidson import davidson def test_davidson(): np.random.seed(0) dim = 1000 A = np.diag(np.arange(dim,dtype=np.float64)) A[1:3,1:3] = 0 M = np.random.randn(dim,dim) M += M.T A += 1e-4*M roots = 5 E, C = davidson(A, roots) E_true, C_true = np.linalg.eigh(A) E_true, C_true = E_true[:roots], C_true[:,:roots] assert np.allclose(E, E_true)
bsd-3-clause
Python
781d43e48e83f00e4cd18e805efed7558b570adf
introduce btc select command
bittorrent/btc,kissiel/btc
btc/btc_select.py
btc/btc_select.py
import argparse import fnmatch import sys import os import re from .btc import encoder, decoder, error, ordered_dict _description = 'select some values' def main(): parser = argparse.ArgumentParser() parser.add_argument('keys', metavar='KEY', nargs='+', default=None, help='keys associated with values to be selected') args = parser.parse_args() if sys.stdin.isatty(): parser.error('no input, pipe another btc command output into this command') l = sys.stdin.read() if len(l.strip()) == 0: exit(1) try: l = decoder.decode(l) except ValueError: error('unexpected input: %s' % l) if not isinstance(l, list): error('input must be a list') elif not all(isinstance(x, dict) for x in l): error('list items must be dictionaries') out = [] for i, e in enumerate(l): e_out = {} for key in args.keys: try: if len(args.keys) == 1: e_out = e[key] else: e_out[key] = e[key] except KeyError: error('key not found: {}'.format(key)) out.append(e_out) if len(args.keys) > 1: print(encoder.encode([ordered_dict(d) for d in out])) else: print(encoder.encode([e for e in out])) if __name__ == '__main__': main()
mit
Python
f947e6766c77f58a6cc1bd0d97758e43d6750c7f
add barycentric coordinates
compas-dev/compas
src/compas/geometry/interpolation/barycentric.py
src/compas/geometry/interpolation/barycentric.py
from __future__ import print_function from __future__ import absolute_import from __future__ import division from compas.geometry import subtract_vectors from compas.geometry import dot_vectors __all__ = [ 'barycentric_coordinates' ] def barycentric_coordinates(point, triangle): """Compute the barycentric coordinates of a point wrt to a triangle. Parameters ---------- point: list Point location. triangle: (point, point, point) A triangle defined by 3 points. Returns ------- list The barycentric coordinates of the point. """ a, b, c = triangle v0 = subtract_vectors(b, a) v1 = subtract_vectors(c, a) v2 = subtract_vectors(point, a) d00 = dot_vectors(v0, v0) d01 = dot_vectors(v0, v1) d11 = dot_vectors(v1, v1) d20 = dot_vectors(v2, v0) d21 = dot_vectors(v2, v1) D = d00 * d11 - d01 * d01 v = (d11 * d20 - d01 * d21) / D w = (d00 * d21 - d01 * d20) / D u = 1.0 - v - w return u, v, w # ============================================================================== # Main # ============================================================================== if __name__ == '__main__': pass
mit
Python
ab946575b1050e67e2e6b4fdda237faa2dc342f5
add conversion script for BDDMPipeline
huggingface/diffusers
scripts/conversion_bddm.py
scripts/conversion_bddm.py
import argparse import torch from diffusers.pipelines.bddm import DiffWave, BDDMPipeline from diffusers import DDPMScheduler def convert_bddm_orginal(checkpoint_path, noise_scheduler_checkpoint_path, output_path): sd = torch.load(checkpoint_path, map_location="cpu")["model_state_dict"] noise_scheduler_sd = torch.load(noise_scheduler_checkpoint_path, map_location="cpu") model = DiffWave() model.load_state_dict(sd, strict=False) ts, _, betas, _ = noise_scheduler_sd ts, betas = list(ts.numpy().tolist()), list(betas.numpy().tolist()) noise_scheduler = DDPMScheduler( timesteps=12, trained_betas=betas, timestep_values=ts, clip_sample=False, tensor_format="np", ) pipeline = BDDMPipeline(model, noise_scheduler) pipeline.save_pretrained(output_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", type=str, required=True) parser.add_argument("--noise_scheduler_checkpoint_path", type=str, required=True) parser.add_argument("--output_path", type=str, required=True) args = parser.parse_args() convert_bddm_orginal(args.checkpoint_path, args.noise_scheduler_checkpoint_path, args.output_path)
apache-2.0
Python
922513b2e0e26432fd4e4addfe83e2b84d631d4f
Create change_function_signature.py
amorgun/python-trickery
change_function_signature.py
change_function_signature.py
def foo(a): print(a) def bar(a, b): print(a, b) func = foo func(10) func.__code__ = bar.__code__ func(10, 20)
mit
Python
a3b31137ac96bf3480aaecadd5faf3ca051fc4b0
Add bare ConnectionState
nullpixel/litecord,nullpixel/litecord
litecord/gateway/state.py
litecord/gateway/state.py
class ConnectionState: """State of a connection to the gateway over websockets Attributes ---------- session_id: str Session ID this state refers to. events: `collections.deque`[dict] Deque of sent events to the connection. Used for resuming This is filled up when the connection receives a dispatched event """ def __init__(self, session_id): self.session_id = session_id def clean(self): del self.session_id
mit
Python
83f2e11d63168e022d99075d2f35c6c813c4d37d
add a simple linear regression model
astromme/classify-handwritten-characters,astromme/classify-handwritten-characters
gnt_model.py
gnt_model.py
import tensorflow as tf from utils.gnt_record import read_and_decode, BATCH_SIZE with open('label_keys.list') as f: labels = f.readlines() tfrecords_filename = "hwdb1.1.tfrecords" filename_queue = tf.train.string_input_producer( [tfrecords_filename], num_epochs=10) # Even when reading in multiple threads, share the filename # queue. images_batch, labels_batch = read_and_decode(filename_queue) label_one_hot = tf.one_hot(labels_batch, len(labels)) print(label_one_hot) # simple model images_batch_normalized = images_batch / 128 - 0.5 print(images_batch) print(images_batch_normalized) images_batch_normalized = tf.reshape(images_batch_normalized, [BATCH_SIZE, 128*128]) print(images_batch_normalized) w = tf.get_variable("w1", [128*128, len(labels)]) y_pred = tf.matmul(images_batch_normalized, w) print("y pred & labels batch") print(y_pred) print(label_one_hot) loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_batch, logits=y_pred) # for monitoring loss_mean = tf.reduce_mean(loss) train_op = tf.train.AdamOptimizer().minimize(loss) # The op for initializing the variables. init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) with tf.Session() as sess: sess.run(init_op) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) while True: _, loss_val = sess.run([train_op, loss_mean]) print(loss_val) coord.request_stop() coord.join(threads)
mit
Python
15970841d53e14d3739d8f512f815e8e3c19bf02
Create Opcao.py
AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb
backend/Database/Models/Opcao.py
backend/Database/Models/Opcao.py
from Database.Controllers.Curso import Curso class Opcao(object): def __init__(self,dados=None): if dados is not None: self.id = dados ['id'] self.nome = dados ['nome'] self.id_curso = dados ['id_curso'] def getId(self): return self.id def setNome(self,nome): self.nome = nome def getNome(self): return self.nome def setId_curso(self,curso): self.id_curso = (Curso().pegarCurso('nome = %s',(curso))).getId() def getId_curso(self): return self.id_curso def getCurso(self): return (Curso().pegarCurso('id = %s',(self.id_curso,))).getNome()
mit
Python
3f50dcf6d91192253af320aaf72fcb13d307e137
add new package
iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack
var/spack/repos/builtin/packages/memsurfer/package.py
var/spack/repos/builtin/packages/memsurfer/package.py
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Memsurfer(PythonPackage): """MemSurfer is a tool to compute and analyze membrane surfaces found in a wide variety of large-scale molecular simulations.""" homepage = "https://github.com/LLNL/MemSurfer" git = "git@github.com:LLNL/MemSurfer.git" # url = "https://github.com/LLNL/MemSurfer/archive/1.0.tar.gz" # version('1.0', sha256='06e06eba88754b0c073f1c770981f7bdd501082986e4fbe28399be23b50138de') version('1.0', tag='v1.0', submodules=True) version('master', branch='master', submodules=True) # version('test', branch='ppoisson', submodules=True) variant('vtkmesa', default=False, description='Enable OSMesa support for VTK') extends('python@2.7.16') depends_on('cmake@3.14:') depends_on('swig@3.0.12') depends_on('py-cython') depends_on('py-numpy') depends_on('py-pip') depends_on('eigen@3.3.7') depends_on('cgal@4.13 +shared~core~demos~imageio') # vtk needs to know whether to build with mesa or opengl depends_on('vtk@8.1.2 +python+opengl2~mpi~haru', when='~vtkmesa') depends_on('vtk@8.1.2 +python+opengl2~mpi~haru +osmesa', when='+vtkmesa') # this is needed only to resolve the conflict between # the default and netcdf's spec depends_on('hdf5 +hl') # memsurfer's setup needs path to these deps to build extension modules def setup_environment(self, spack_env, run_env): spack_env.set('VTK_ROOT', self.spec['vtk'].prefix) spack_env.set('CGAL_ROOT', self.spec['cgal'].prefix) spack_env.set('BOOST_ROOT', self.spec['boost'].prefix) spack_env.set('EIGEN_ROOT', self.spec['eigen'].prefix)
lgpl-2.1
Python
76cfd2931f3aaacf37e39218833d2307233ddd04
Add tests for bson serialization functions
dwavesystems/dimod,dwavesystems/dimod
tests/test_serialization_bson.py
tests/test_serialization_bson.py
import unittest import dimod from dimod.serialization.bson import bqm_bson_decoder, bqm_bson_encoder import numpy as np try: import bson _bson_imported = True except ImportError: _bson_imported = False class TestBSONSerialization(unittest.TestCase): def test_empty_bqm(self): bqm = dimod.BinaryQuadraticModel.from_qubo({}) encoded = bqm_bson_encoder(bqm) expected_encoding = { 'as_complete': False, 'linear': b'', 'quadratic_vals': b'', 'variable_type': 'BINARY', 'offset': 0.0, 'variable_order': [], 'index_dtype': '<u2', 'quadratic_head': b'', 'quadratic_tail': b'', } self.assertDictEqual(encoded, expected_encoding) decoded = bqm_bson_decoder(encoded) self.assertEqual(bqm, decoded) def test_single_variable_bqm(self): bqm = dimod.BinaryQuadraticModel.from_ising({"a": -1}, {}) encoded = bqm_bson_encoder(bqm) expected_encoding = { 'as_complete': False, 'linear': b'\x00\x00\x80\xbf', 'quadratic_vals': b'', 'variable_type': 'SPIN', 'offset': 0.0, 'variable_order': ['a'], 'index_dtype': '<u2', 'quadratic_head': b'', 'quadratic_tail': b'', } self.assertDictEqual(encoded, expected_encoding) decoded = bqm_bson_decoder(encoded) self.assertEqual(bqm, decoded) def test_small_bqm(self): bqm = dimod.BinaryQuadraticModel.from_ising( {"a": 1, "b": 3, "c": 4.5, "d": 0}, {"ab": -3, "cd": 3.5, "ad": 2} ) encoded = bqm_bson_encoder(bqm) expected_encoding = { 'as_complete': True, 'linear': b'\x00\x00\x80?\x00\x00@@\x00\x00\x90@\x00\x00\x00\x00', 'quadratic_vals': b'\x00\x00@\xc0\x00\x00\x00\x00\x00\x00\x00@' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`@', 'variable_type': 'SPIN', 'offset': 0.0, 'variable_order': ['a', 'b', 'c', 'd'], 'index_dtype': '<u2', } self.assertDictEqual(encoded, expected_encoding) decoded = bqm_bson_decoder(encoded) # no easy way to directly check if the bqm objects are equal (b/c float # precision, missing edges), so for now check if the qubo matrices are # the same var_order = sorted(bqm) np.testing.assert_almost_equal(bqm.to_numpy_matrix(var_order), decoded.to_numpy_matrix(var_order)) @unittest.skipUnless(_bson_imported, "no pymongo bson installed") def test_bsonable(self): bqm = dimod.BinaryQuadraticModel.from_ising( {"a": 1, "b": 3, "c": 4.5, "d": 0}, {"ab": -3, "cd": 3.5, "ad": 2} ) encoded = bqm_bson_encoder(bqm) bson.BSON.encode(encoded)
apache-2.0
Python
b5db8d8b0620491169d54eaf05bb57e5a61903e1
add bash8 tool (like pep8, but way hackier)
openstack-dev/bashate,openstack-dev/bashate,locke105/bash8
bash8.py
bash8.py
#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # bash8 - a pep8 equivalent for bash scripts # # this program attempts to be an automated style checker for bash scripts # to fill the same part of code review that pep8 does in most OpenStack # projects. It starts from humble beginnings, and will evolve over time. # # Currently Supported checks # # Errors # - E001: check that lines do not end with trailing whitespace # - E002: ensure that indents are only spaces, and not hard tabs # - E003: ensure all indents are a multiple of 4 spaces import argparse import fileinput import re import sys ERRORS = 0 def print_error(error, line): global ERRORS ERRORS = ERRORS + 1 print("%s: '%s'" % (error, line.rstrip('\n'))) print(" - %s: L%s" % (fileinput.filename(), fileinput.filelineno())) def check_no_trailing_whitespace(line): if re.search('[ \t]+$', line): print_error('E001: Trailing Whitespace', line) def check_indents(line): m = re.search('^(?P<indent>[ \t]+)', line) if m: if re.search('\t', m.group('indent')): print_error('E002: Tab indents', line) if (len(m.group('indent')) % 4) != 0: print_error('E003: Indent not multiple of 4', line) def check_files(files): for line in fileinput.input(files): check_no_trailing_whitespace(line) check_indents(line) def get_options(): parser = argparse.ArgumentParser( description='A bash script style checker') parser.add_argument('files', metavar='file', nargs='+', help='files to scan for errors') return parser.parse_args() def main(): opts = get_options() check_files(opts.files) if ERRORS > 0: print("%d bash8 error(s) found" % ERRORS) return 1 else: return 0 if __name__ == "__main__": sys.exit(main())
apache-2.0
Python
db4ba2ca4e0ea96c9bc3f7e9d3eb61e7c7c3bc23
Create softmax
ueser/Toolbox
softmax.py
softmax.py
#! /usr/bin/env python """ Author: Umut Eser Program: softmax.py Date: Friday, September 30 2016 Description: Softmax applied over rows of a matrix """ import numpy as np def softmax(X): """ Calculates softmax of the rows of a matrix X. Parameters ---------- X : 2D numpy array Return ------ 2D numpy array of positive numbers between 0 and 1 Examples -------- >>> softmax([[0.1, 0.2],[0.9, -10]]) array([[ 0.47502081, 0.52497919],[ 9.99981542e-01, 1.84578933e-05]]) """ e_X = np.exp(X - np.max(X,axis=1)) return np.divide(e_X.T,e_X.sum(axis=1)).T if __name__ == "__main__": import doctest doctest.testmod()
mit
Python
28696b671a5f80f781c67f35ae5abb30efd6379c
Solve Time Conversion in python
deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground
solutions/uri/1019/1019.py
solutions/uri/1019/1019.py
import sys h = 0 m = 0 for t in sys.stdin: t = int(t) if t >= 60 * 60: h = t // (60 * 60) t %= 60 * 60 if t >= 60: m = t // 60 t %= 60 print(f"{h}:{m}:{t}") h = 0 m = 0
mit
Python
9876f372100bbc4c272378fe9a06f7d7ddd90308
Add twitter daily backup script
dazzyd/bin,dazzyd/bin,yukixz/bin,dazzyd/bin,yukixz/bin
twitter/daily.py
twitter/daily.py
#!/usr/bin/env python3 import json from datetime import datetime import pymysql from requests_oauthlib import OAuth1Session class Twitter(): def __init__(self): self.session = OAuth1Session( client_key="{consumer_key}", client_secret="{consumer_secret}", resource_owner_key="{access_token}", resource_owner_secret="{access_secret}", ) def crawl(self, last_id): ''' Crawl new tweet from user timeline. :param last_id: last tweet's id in database :return list: ''' if type(last_id) != int: raise TypeError("arg last_id expects int") url = "https://api.twitter.com/1.1/statuses/user_timeline.json" params = { 'count': 200, 'since_id': last_id, 'max_id': None, 'trim_user': True, 'contributor_details': False, 'exclude_replies': False, 'include_rts': True, } while True: response = self.session.get(url, params=params) tweets = json.loads(response.text) if len(tweets) > 0: yield from tweets params['max_id'] = tweets[-1]['id'] - 1 else: break class Database(): def __init__(self): self.connection = pymysql.connect( host="localhost", user="{mysql_user}", password="{mysql_password}", database="twitter", charset="utf8mb4", ) def insert(self, tweet): try: tweet_id = tweet['id'] user_id = tweet['user']['id'] text = tweet['text'] time = str(datetime.strptime( tweet['created_at'].replace("+0000", ''), "%c")) cursor = self.connection.cursor() cursor.execute( '''INSERT INTO statuses VALUES (%s, %s, %s, %s)''', (tweet_id, user_id, text, time)) except pymysql.err.ProgrammingError as err: print("Fail to insert tweet: %d" % tweet_id, file=sys.stderr) traceback.print_exc() def get_last_id(self): cursor = self.connection.cursor() cursor.execute("SELECT id FROM statuses ORDER BY id DESC LIMIT 1") tweet = cursor.fetchone() return tweet[0] if __name__ == '__main__': twitter = Twitter() database = Database() last_id = database.get_last_id() timeline = twitter.crawl(last_id) for tweet in timeline: database.insert(tweet)
apache-2.0
Python
69005d995aa0e6d291216101253197c6b2d8260a
Add module for command-line interface
starcalibre/microscopium,jni/microscopium,microscopium/microscopium,microscopium/microscopium,Don86/microscopium,jni/microscopium,Don86/microscopium
husc/main.py
husc/main.py
import argparse parser = argparse.ArgumentParser(description="Run the HUSC functions.") subpar = parser.add_subparsers() stitch = subpar.add_parser('stitch', help="Stitch four quadrants into one image.") stitch.add_argument('quadrant_image', nargs=4, help="The images for each quadrant in order: NW, NE, " + "SW, SE.") stitch.add_argument('output_image', help="The filename for the stitched image.") illum = subpar.add_parser('illum', help="Estimate and correct illumination.") illum.add_argument('images', nargs='+', help="The input images.") illum.add_argument('-o', '--output-suffix', default='.illum.tif', metavar='SUFFIX', help="What suffix to attach to the corrected images.") def main(): """Fetch commands from the command line.""" args = parser.parse_args() print args if __name__ == '__main__': main()
bsd-3-clause
Python
10440cbcde68ecf16c8b8b326ec96d1d7f8c6d6d
add basic PID for sial position
smaria/autonomous-sailing-robot,Southampton-Maritime-Robotics/autonomous-sailing-robot
src/boat_pid_control/src/boat_pid_control/sailPID.py
src/boat_pid_control/src/boat_pid_control/sailPID.py
""" PID control for the sailing robot controling sail position based on goal sail direction Inputs: - current heading - goal heading Output: - Change in motor position/motor position TODO: consider tack and jibe """ import rospy PROPORTIONAL_GAIN = 0.1 INTEGRAL_GAIN = 0 DERIVATIVE_GAIN = 0 currentHeading = 23 goalHeading = 35 # with new ROS input for goal or current heading # Error calculation for angular error! error = currentHeading - goalHeading p = error * PROPORTIONAL_GAIN i = 0 d = 0 correction = p + i + d #translate correction to servo change ...
bsd-2-clause
Python
ea0087970b0c0adfd8942123899ff0ec231afa03
Handle stealable element with utils
AleksNeStu/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,prasannav7/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,kr41/ggrc-core
test/selenium/src/lib/page/extended_info.py
test/selenium/src/lib/page/extended_info.py
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: jernej@reciprocitylabs.com # Maintained By: jernej@reciprocitylabs.com """A module for extended info page models (visible in LHN on hover over object members)""" from selenium.common import exceptions from lib import base from lib.constants import locator from lib.utils import selenium_utils class ExtendedInfo(base.Component): """Model representing an extended info box that allows the object to be mapped""" locator_cls = locator.ExtendedInfo def __init__(self, driver): super(ExtendedInfo, self).__init__(driver) self.is_mapped = None self.button_map = None self.title = base.Label(driver, self.locator_cls.TITLE) self._set_is_mapped() def map_to_object(self): selenium_utils.click_on_staleable_element( self._driver, self.locator_cls.BUTTON_MAP_TO) self.is_mapped = True def _set_is_mapped(self): """Checks if the object is already mapped""" try: self._driver.find_element(*self.locator_cls.ALREADY_MAPPED) self.is_mapped = True except exceptions.NoSuchElementException: self.is_mapped = False
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: jernej@reciprocitylabs.com # Maintained By: jernej@reciprocitylabs.com """A module for extended info page models (visible in LHN on hover over object members)""" from selenium.common import exceptions from lib import base from lib.constants import locator class ExtendedInfo(base.Component): """Model representing an extended info box that allows the object to be mapped""" _locator = locator.ExtendedInfo def __init__(self, driver): super(ExtendedInfo, self).__init__(driver) self.button_map = None def _reload_contents(self): self.button_map = base.Button( self._driver, self._locator.BUTTON_MAP_TO) def map_to_object(self): try: self.button_map = base.Button( self._driver, self._locator.BUTTON_MAP_TO) self.button_map.click() except exceptions.StaleElementReferenceException: self._reload_contents() return self.map_to_object() def is_already_mapped(self): """Checks if the object is already mapped""" try: self._driver.find_element(*self._locator.ALREADY_MAPPED) return True except exceptions.NoSuchElementException: return False
apache-2.0
Python
1f47c575cfd310fd4bee18673f7cbb69eb622959
Create block_params.py
bluewitch/Code-Blue-Python
block_params.py
block_params.py
# block_params.py # Demonstration of a blockchain 2 of 3 components GENESIS_INDEX = 0 GENESIS_PREVIOUS_HASH = '0' GENESIS_TIMESTAMP = 1495851743 GENESIS_DATA = 'first block' class BlockParams(): def __init__(self, index, previous_hash, timestamp, data): self.index = index self.previous_hash = previous_hash self.timestamp = timestamp self.data = data def __str__(self): return str(self.index) + self.previous_hash + str(self.timestamp) + self.data @classmethod def genesis_params(cls): return cls(GENESIS_INDEX, GENESIS_PREVIOUS_HASH, GENESIS_TIMESTAMP, GENESIS_DATA)
mit
Python
aa4f01690c4db950144e520cf11466d7d92de291
Fix for output ports
eubr-bigsea/tahiti,eubr-bigsea/tahiti,eubr-bigsea/tahiti,eubr-bigsea/tahiti
migrations/versions/910243d8f820_fix_output_ports_with_models.py
migrations/versions/910243d8f820_fix_output_ports_with_models.py
"""fix output ports with models Revision ID: 910243d8f820 Revises: 500f09c2325d Create Date: 2017-12-19 16:19:33.927563 """ import json from alembic import context from alembic import op from sqlalchemy import String, Integer from sqlalchemy.orm import sessionmaker from sqlalchemy.sql import table, column # revision identifiers, used by Alembic. revision = '910243d8f820' down_revision = '500f09c2325d' branch_labels = None depends_on = None def _insert_operation_port(): tb = table( 'operation_port', column('id', Integer), column('type', String), column('tags', String), column('operation_id', Integer), column('order', Integer), column('multiplicity', String), column('slug', String)) all_ops = [ (15, 'OUTPUT', None, 52, 3, 'MANY', 'vector-model'), ] rows = [dict(zip([c.name for c in tb.columns], operation)) for operation in all_ops] op.bulk_insert(tb, rows) def _insert_operation_port_translation(): tb = table( 'operation_port_translation', column('id', Integer), column('locale', String), column('name', String), column('description', String), ) all_ops = [ (15, 'en', 'vector model', 'Vector model'), (15, 'pt', 'modelo de vetores', 'Modelo de vetores'), ] rows = [dict(zip([c.name for c in tb.columns], operation)) for operation in all_ops] op.bulk_insert(tb, rows) def _insert_operation_port_interface_operation_port(): tb = table( 'operation_port_interface_operation_port', column('operation_port_id', Integer), column('operation_port_interface_id', Integer)) columns = [c.name for c in tb.columns] data = [ (15, 20), (83, 20), ] rows = [dict(zip(columns, cat)) for cat in data] op.bulk_insert(tb, rows) new_values = [ {"key": "count", "value": "Count term frequency"}, {"key": "word2vec", "value": "Use word2vec algorithm"}, {"key": "hashing_tf", "value": "Map the sequence of terms to their TF using hashing trick"}, ] old_values = [ {"key": "count", "value": "Count term frequency"}, {"key": "word2vec", "value": "Use word2vec algorithm"}, ] all_commands = [ [_insert_operation_port, 'DELETE from operation_port WHERE id IN (15)'], [_insert_operation_port_translation, 'DELETE from operation_port_translation WHERE id IN (15)'], [ _insert_operation_port_interface_operation_port, 'DELETE FROM operation_port_interface_operation_port ' 'WHERE operation_port_id IN (15, 83) ' ' AND operation_port_interface_id = 20', ], [ "UPDATE operation_form_field SET `values` = '{}' WHERE id = 123".format( json.dumps(new_values)), "UPDATE operation_form_field SET `values` = '{}' WHERE id = 123".format( json.dumps(old_values)) ] ] def upgrade(): ctx = context.get_context() session = sessionmaker(bind=ctx.bind)() connection = session.connection() try: for cmd in all_commands: if isinstance(cmd[0], (unicode, str)): connection.execute(cmd[0]) elif isinstance(cmd[0], list): for row in cmd[0]: connection.execute(row) else: cmd[0]() except: session.rollback() raise session.commit() def downgrade(): ctx = context.get_context() session = sessionmaker(bind=ctx.bind)() connection = session.connection() try: for cmd in reversed(all_commands): if isinstance(cmd[1], (unicode, str)): connection.execute(cmd[1]) elif isinstance(cmd[1], list): for row in cmd[1]: connection.execute(row) else: cmd[1]() except: session.rollback() raise session.commit()
apache-2.0
Python
65dc2f12d8540d3aa494447033e022fe3995701b
correct language mistake
kissiel/btc,bittorrent/btc
btc_download.py
btc_download.py
#! /usr/bin/env python import argparse import sys import os from btc import encoder, decoder, error, warning, list_to_dict, dict_to_list, client _description = 'download torrent file locally' def main(): parser = argparse.ArgumentParser() parser.add_argument('-d', '--directory', default='.') parser.add_argument('-o', '--output', default=None) args = parser.parse_args() if sys.stdin.isatty(): error('no input') files = sys.stdin.read() try: files = decoder.decode(files) except ValueError: error('unexpected input: %s' % files) if not os.path.exists(args.directory): error('no such directory: %s' % args.directory) if args.output and len(files) > 1: if sys.stdout.isatty(): warning('multiple files: --output is ignored') for f in files: # FIXME: problems with \\ and / filename = args.output or f['name'] complete = float(f['downloaded']) / float(f['size']) * 100 if sys.stdout.isatty() and complete < 100.0: print 'skipping incomplete file: %s' % f['name'] continue if args.output and len(files) > 1: filename = f['name'] if args.output and len(files) == 1: directory = os.path.dirname(os.path.join(args.directory, args.output)) if not os.path.exists(directory): error('no such directory: %s' % directory) else: directory = os.path.dirname(os.path.join(args.directory, f['name'])) if not os.path.exists(directory): os.makedirs(directory) if sys.stdout.isatty(): print 'downloading: %s' % os.path.join(args.directory, filename) client.torrent_download_file(f['sid'], f['fileid'], filename, args.directory) if not sys.stdout.isatty(): l = client.list_torrents() print encoder.encode(l) if __name__ == '__main__': main()
#! /usr/bin/env python import argparse import sys import os from btc import encoder, decoder, error, warning, list_to_dict, dict_to_list, client _description = 'download torrent file locally' def main(): parser = argparse.ArgumentParser() parser.add_argument('-d', '--directory', default='.') parser.add_argument('-o', '--output', default=None) args = parser.parse_args() if sys.stdin.isatty(): error('no input') files = sys.stdin.read() try: files = decoder.decode(files) except ValueError: error('unexpected input: %s' % files) if not os.path.exists(args.directory): error('no such directory: %s' % args.directory) if args.output and len(files) > 1: if sys.stdout.isatty(): warning('multiple files: --output is ignored') for f in files: # FIXME: problems with \\ and / filename = args.output or f['name'] complete = float(f['downloaded']) / float(f['size']) * 100 if sys.stdout.isatty() and complete < 100.0: print 'skipping uncomplete file: %s' % f['name'] continue if args.output and len(files) > 1: filename = f['name'] if args.output and len(files) == 1: directory = os.path.dirname(os.path.join(args.directory, args.output)) if not os.path.exists(directory): error('no such directory: %s' % directory) else: directory = os.path.dirname(os.path.join(args.directory, f['name'])) if not os.path.exists(directory): os.makedirs(directory) if sys.stdout.isatty(): print 'downloading: %s' % os.path.join(args.directory, filename) client.torrent_download_file(f['sid'], f['fileid'], filename, args.directory) if not sys.stdout.isatty(): l = client.list_torrents() print encoder.encode(l) if __name__ == '__main__': main()
mit
Python
7922b24882894cbc83bd4247c11d8c4a66b4b218
Add utility script for database setup
leaffan/pynhldb
_setup_database.py
_setup_database.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from setup.create_teams import migrate_teams from setup.create_divisions import create_divisions if __name__ == '__main__': # migrating teams from json file to database migrate_teams(simulation=True) # creating divisions from division configuration file create_divisions(simulation=True)
mit
Python
af436dd269a959324b495885b9406610f3737a7a
Create addtoindex.py
wdyer0726/CS101
udacity/webcrawler/addtoindex.py
udacity/webcrawler/addtoindex.py
# Define a procedure, add_to_index, # that takes 3 inputs: # - an index: [[<keyword>,[<url>,...]],...] # - a keyword: String # - a url: String # If the keyword is already # in the index, add the url # to the list of urls associated # with that keyword. # If the keyword is not in the index, # add an entry to the index: [keyword,[url]] index = [] def add_to_index(index,keyword,url): for entry in index: if entry[0] == keyword: entry[1].append(url) return index.append([keyword, [url]]) add_to_index(index,'udacity','http://udacity.com') add_to_index(index,'computing','http://acm.org') add_to_index(index,'udacity','http://npr.org') print index #>>> [['udacity', ['http://udacity.com', 'http://npr.org']], #>>> ['computing', ['http://acm.org']]]
apache-2.0
Python
93621f9441af4df77c8364050d7cc3dc2b1b43b2
Add tests for `check` command
igordejanovic/textX,igordejanovic/textX,igordejanovic/textX
tests/functional/registration/test_check.py
tests/functional/registration/test_check.py
""" Test check/validation command. """ import os import subprocess this_folder = os.path.abspath(os.path.dirname(__file__)) def test_check_metamodel(): """ Meta-model is also a model """ metamodel_file = os.path.join(this_folder, 'projects', 'flow_dsl', 'flow_dsl', 'Flow.tx') output = subprocess.check_output(['textx', 'check', metamodel_file], stderr=subprocess.STDOUT) assert b'Flow.tx: OK.' in output def test_check_valid_model(): metamodel_file = os.path.join(this_folder, 'projects', 'flow_dsl', 'tests', 'models', 'data_flow.eflow') output = subprocess.check_output(['textx', 'check', metamodel_file], stderr=subprocess.STDOUT) assert b'data_flow.eflow: OK.' in output def test_check_invalid_model(): metamodel_file = os.path.join(this_folder, 'projects', 'flow_dsl', 'tests', 'models', 'data_flow_including_error.eflow') output = subprocess.check_output(['textx', 'check', metamodel_file], stderr=subprocess.STDOUT) assert b'error: types must be lowercase' in output
mit
Python
720e288aba61ecc2214c8074e33d181c0d4584f5
Add do_datasets module
CartoDB/cartodb-python,CartoDB/carto-python
carto/do_datasets.py
carto/do_datasets.py
""" Module for working with Data Observatory Datasets .. module:: carto.do_datasets :platform: Unix, Windows :synopsis: Module for working with Data Observatory Datasets .. moduleauthor:: Jesús Arroyo <jarroyo@carto.com> """ from pyrestcli.fields import CharField from .resources import Resource, Manager from .exceptions import CartoException from .paginators import CartoPaginator API_VERSION = "v4" API_ENDPOINT = "api/{api_version}/do/datasets/" class DODatasets(Resource): """ Represents a Data Observatory Datasets object in CARTO. """ datasets = CharField(many=True) class Meta: collection_endpoint = API_ENDPOINT.format(api_version=API_VERSION) name_field = "datasets" class DODatasetsManager(Manager): """ Manager for the DODatasets class. """ resource_class = DODatasets json_collection_attribute = "result" paginator_class = CartoPaginator def get(self): return super(DODatasetsManager, self).get("datasets")
bsd-3-clause
Python
69939f351cd9c9d555fa1cd091b67314558e862b
Add __future__ import
lukw00/powerline,lukw00/powerline,magus424/powerline,Liangjianghao/powerline,wfscheper/powerline,darac/powerline,firebitsbr/powerline,magus424/powerline,Liangjianghao/powerline,Liangjianghao/powerline,seanfisk/powerline,xfumihiro/powerline,kenrachynski/powerline,junix/powerline,EricSB/powerline,s0undt3ch/powerline,DoctorJellyface/powerline,prvnkumar/powerline,lukw00/powerline,darac/powerline,dragon788/powerline,wfscheper/powerline,s0undt3ch/powerline,QuLogic/powerline,cyrixhero/powerline,prvnkumar/powerline,Luffin/powerline,S0lll0s/powerline,prvnkumar/powerline,firebitsbr/powerline,xxxhycl2010/powerline,QuLogic/powerline,IvanAli/powerline,firebitsbr/powerline,bartvm/powerline,areteix/powerline,russellb/powerline,QuLogic/powerline,kenrachynski/powerline,IvanAli/powerline,blindFS/powerline,S0lll0s/powerline,dragon788/powerline,magus424/powerline,EricSB/powerline,xxxhycl2010/powerline,bartvm/powerline,Luffin/powerline,S0lll0s/powerline,areteix/powerline,Luffin/powerline,russellb/powerline,EricSB/powerline,areteix/powerline,bartvm/powerline,bezhermoso/powerline,bezhermoso/powerline,cyrixhero/powerline,s0undt3ch/powerline,seanfisk/powerline,seanfisk/powerline,xfumihiro/powerline,junix/powerline,dragon788/powerline,junix/powerline,wfscheper/powerline,blindFS/powerline,russellb/powerline,DoctorJellyface/powerline,kenrachynski/powerline,DoctorJellyface/powerline,xfumihiro/powerline,IvanAli/powerline,darac/powerline,cyrixhero/powerline,xxxhycl2010/powerline,bezhermoso/powerline,blindFS/powerline
powerline/segments/tmux.py
powerline/segments/tmux.py
# vim:fileencoding=utf-8:noet from __future__ import absolute_import, unicode_literals, division, print_function from powerline.bindings.tmux import get_tmux_output def attached_clients(pl, minimum=1): '''Return the number of tmux clients attached to the currently active session :param int minimum: The minimum number of attached clients that must be present for this segment to be visible. ''' session_output = get_tmux_output('list-panes', '-F', '#{session_name}') if not session_output: return None session_name = session_output.rstrip().split('\n')[0] attached_clients_output = get_tmux_output('list-clients', '-t', session_name) attached_count = len(attached_clients_output.rstrip().split('\n')) return None if attached_count < minimum else str(attached_count)
# vim:fileencoding=utf-8:noet from powerline.bindings.tmux import get_tmux_output def attached_clients(pl, minimum=1): '''Return the number of tmux clients attached to the currently active session :param int minimum: The minimum number of attached clients that must be present for this segment to be visible. ''' session_output = get_tmux_output('list-panes', '-F', '#{session_name}') if not session_output: return None session_name = session_output.rstrip().split('\n')[0] attached_clients_output = get_tmux_output('list-clients', '-t', session_name) attached_count = len(attached_clients_output.rstrip().split('\n')) return None if attached_count < minimum else str(attached_count)
mit
Python
6e8c3147938c72114b2fd18db47cca8a23fd147e
fix tests
SpringerPE/cf-configuration-exporter
test/mutations_test.py
test/mutations_test.py
import unittest from exporter.mutations import TerraformMutation, ManifestMutation from exporter.exceptions import FieldNotOptionalException cf_dict = { "cf_orgs":[{ "name": "first_org", "spaces": [ { "name": "first_space", "org": "first_org", "quota": "m", "allow_ssh": True, "asgs": [], "managers": [{"name": "manager"}], "developers": [{"name": "developer"}], "auditors": [{"name": "auditor"}] }, { "name": "second_space", "org": "second_org", "quota": "m", "allow_ssh": False, "asgs": [], "managers": [{"name": "manager"}], "developers": [{"name": "developer"}], "auditors": [{"name": "auditor"}] }] }] } class TestMutations(unittest.TestCase): @classmethod def setUpClass(cls): pass def test_map_field(self): source_dict = {"name": "resource_name", "property": "value", "none_property": None} dest_dict = {} tm = ManifestMutation(source_dict) tm.map_field("name", dest_dict, source_dict) self.assertIn("name", dest_dict) self.assertEqual(dest_dict["name"], "resource_name") # Test renaming cf field tm.map_field("mutated_name", dest_dict, source_dict, cf_field="name") self.assertIn("mutated_name", dest_dict) self.assertEqual(dest_dict["mutated_name"], "resource_name") #Test optionality with self.assertRaises(FieldNotOptionalException): tm.map_field("not_existent", dest_dict, source_dict, optional=False) tm.map_field("not_existent", dest_dict, source_dict, optional=True) self.assertNotIn("not_existent", dest_dict) #Test default tm.map_field("not_existent_w_default", dest_dict, source_dict, cf_field="not_existent", optional=True, default="default") self.assertIn("not_existent_w_default", dest_dict) self.assertEqual(dest_dict["not_existent_w_default"], "default") #Test mapping tm.map_field("mapped_property", dest_dict, source_dict, cf_field="property", mapping={"value": "mapped_value"}) self.assertIn("mapped_property", dest_dict) self.assertEqual(dest_dict["mapped_property"], "mapped_value") #Test mapping when source value is None tm.map_field("none_property", dest_dict, source_dict) self.assertNotIn("none_property", dest_dict) def test_map_list_field(self): source_dict = {"name": "resource_name", "list_item": ["value1", "value2"]} dest_dict = {} tm = ManifestMutation(source_dict) tm.map_list_field("list_item", dest_dict, source_dict) self.assertIn("list_item", dest_dict) self.assertEqual(["value1", "value2"], dest_dict["list_item"]) # Test that key formatting works tm.map_list_field("list_item", dest_dict, source_dict, fmt='fmt_{}') self.assertIn("list_item", dest_dict) self.assertEqual(["fmt_value1", "fmt_value2"], dest_dict["list_item"]) # Test that key mapping works tm.map_list_field("list_item", dest_dict, source_dict, key_fn=lambda x: "mapped") self.assertIn("list_item", dest_dict) self.assertEqual(["mapped", "mapped"], dest_dict["list_item"])
mit
Python
ba76ae145c570fce671f0ab115d4a0740a29cde4
add hadoop
mihaisoloi/conpaas,mihaisoloi/conpaas,mihaisoloi/conpaas,mihaisoloi/conpaas,mihaisoloi/conpaas
conpaas-client/cps/hadoop.py
conpaas-client/cps/hadoop.py
import sys from cps.base import BaseClient class Client(BaseClient): def info(self, service_id): service = BaseClient.info(self, service_id) def usage(self, cmdname): BaseClient.usage(self, cmdname)
bsd-3-clause
Python
2476e7202933c197004688d32994d3b24a7ce74f
Add missing fulltoc for Sphinx documentation.
heuermh/adam,bigdatagenomics/adam,heuermh/adam,bigdatagenomics/adam,bigdatagenomics/adam,heuermh/adam,bigdatagenomics/adam,heuermh/adam
docs/vendor/sphinxcontrib/fulltoc.py
docs/vendor/sphinxcontrib/fulltoc.py
# -*- encoding: utf-8 -*- # # Copyright © 2012 New Dream Network, LLC (DreamHost) # # Author: Doug Hellmann <doug.hellmann@dreamhost.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sphinx import addnodes def html_page_context(app, pagename, templatename, context, doctree): """Event handler for the html-page-context signal. Modifies the context directly. - Replaces the 'toc' value created by the HTML builder with one that shows all document titles and the local table of contents. - Sets display_toc to True so the table of contents is always displayed, even on empty pages. - Replaces the 'toctree' function with one that uses the entire document structure, ignores the maxdepth argument, and uses only prune and collapse. """ rendered_toc = get_rendered_toctree(app.builder, pagename) context['toc'] = rendered_toc context['display_toc'] = True # force toctree to display if "toctree" not in context: # json builder doesn't use toctree func, so nothing to replace return def make_toctree(collapse=True): return get_rendered_toctree(app.builder, pagename, prune=False, collapse=collapse, ) context['toctree'] = make_toctree def get_rendered_toctree(builder, docname, prune=False, collapse=True): """Build the toctree relative to the named document, with the given parameters, and then return the rendered HTML fragment. """ fulltoc = build_full_toctree(builder, docname, prune=prune, collapse=collapse, ) rendered_toc = builder.render_partial(fulltoc)['fragment'] return rendered_toc def build_full_toctree(builder, docname, prune, collapse): """Return a single toctree starting from docname containing all sub-document doctrees. """ env = builder.env doctree = env.get_doctree(env.config.master_doc) toctrees = [] for toctreenode in doctree.traverse(addnodes.toctree): toctree = env.resolve_toctree(docname, builder, toctreenode, collapse=collapse, prune=prune, ) toctrees.append(toctree) if not toctrees: return None result = toctrees[0] for toctree in toctrees[1:]: if toctree: result.extend(toctree.children) env.resolve_references(result, docname, builder) return result def setup(app): app.connect('html-page-context', html_page_context)
apache-2.0
Python
0822547f3fcd79a5332c450d78cd24999e5e81d0
Migrate huts
c2corg/v6_api,c2corg/v6_api,c2corg/v6_api
c2corg_api/scripts/migration/documents/waypoints/huts.py
c2corg_api/scripts/migration/documents/waypoints/huts.py
from c2corg_api.scripts.migration.documents.waypoints.waypoint import \ MigrateWaypoints class MigrateHuts(MigrateWaypoints): def get_name(self): return 'huts' def get_count_query(self): return ( 'select count(*) from app_huts_archives;' ) def get_query(self): return ( 'select ' ' id, document_archive_id, is_latest_version, elevation, ' ' is_protected, redirects_to, ' ' ST_Force2D(ST_SetSRID(geom, 3857)) geom, ' ' shelter_type, is_staffed, phone, url, staffed_capacity, ' ' unstaffed_capacity, has_unstaffed_matress, ' ' has_unstaffed_blanket, has_unstaffed_gas, has_unstaffed_wood ' 'from app_huts_archives ' 'order by id, document_archive_id;' ) def get_count_query_locales(self): return ( 'select count(*) from app_huts_i18n_archives;' ) def get_query_locales(self): return ( 'select ' ' id, document_i18n_archive_id, is_latest_version, culture, ' ' name, description, pedestrian_access, staffed_period ' 'from app_huts_i18n_archives ' 'order by id, document_i18n_archive_id;' ) def get_document(self, document_in, version): waypoint_type = self.convert_type( document_in.shelter_type, MigrateHuts.shelter_types) if waypoint_type is None: waypoint_type = 'hut' return dict( document_id=document_in.id, version=version, waypoint_type=waypoint_type, elevation=document_in.elevation, is_staffed=document_in.is_staffed, phone=document_in.phone, url=document_in.url, capacity_staffed=document_in.staffed_capacity, capacity=document_in.unstaffed_capacity, matress_unstaffed=self.convert_type( document_in.has_unstaffed_matress, MigrateHuts.boolean_types), blanket_unstaffed=self.convert_type( document_in.has_unstaffed_blanket, MigrateHuts.boolean_types), gas_unstaffed=self.convert_type( document_in.has_unstaffed_gas, MigrateHuts.boolean_types), heating_unstaffed=self.convert_type( document_in.has_unstaffed_wood, MigrateHuts.boolean_types) ) def get_document_locale(self, document_in, version): # TODO extract summary return dict( document_id=document_in.id, id=document_in.document_i18n_archive_id, version=version, culture=document_in.culture, title=document_in.name, description=document_in.description, access=document_in.pedestrian_access, access_period=document_in.staffed_period ) shelter_types = { '1': 'hut', '5': 'gite', '2': 'shelter', '3': 'bivouac', '4': 'base_camp', '6': 'camp_site' } boolean_types = { '1': False, '8': True, '0': None, '10': None # non applicable }
agpl-3.0
Python
c994c1c86df7e6698ccef342b1b2101f03c01587
Add daily stats
hansjorg/rust-ci,hansjorg/rust-ci,youprofit/rust-ci-1,youprofit/rust-ci-1,youprofit/rust-ci-1,youprofit/rust-ci-1
tpt/ppatrigger/management/commands/dailystats.py
tpt/ppatrigger/management/commands/dailystats.py
import traceback import pytz from datetime import datetime, timedelta from django.core.management.base import BaseCommand from ppatrigger.models import Package from ppatrigger.models import DailyStats from ppatrigger.models import Build class Command(BaseCommand): args = '' help = 'Compile daily stats for all projects' def handle(self, *args, **options): packages = Package.objects.all() for package in packages: try: latest_daily = DailyStats.objects.filter( package__exact = package).latest('created_at') latest = latest_daily.created_at except DailyStats.DoesNotExist: # First time running, use package creation latest = package.created_at now = datetime.utcnow().replace(tzinfo = pytz.utc) day = latest while day <= now: self.stdout.write(str(day)) next_day = day + timedelta(days=1) builds = Build.objects.filter( project__package__exact = package, fetched_at__gte = day, fetched_at__lt = next_day) if len(builds): successful = 0 failed = 0 errors = 0 for build in builds: self.stdout.write(str(build)) if build.is_success(): successful += 1 elif build.is_failure(): failed += 1 else: errors += 1 stats = DailyStats(package = package, created_at = now, successful = successful, failed = failed, errors = errors) stats.save() day = next_day
apache-2.0
Python
7c2095c0330d14382db76bef944efae5f8d76faf
Add file with tests from rainbow categorical type example
cpcloud/dynd-python,insertinterestingnamehere/dynd-python,michaelpacer/dynd-python,izaid/dynd-python,insertinterestingnamehere/dynd-python,pombredanne/dynd-python,michaelpacer/dynd-python,pombredanne/dynd-python,michaelpacer/dynd-python,michaelpacer/dynd-python,izaid/dynd-python,insertinterestingnamehere/dynd-python,cpcloud/dynd-python,ContinuumIO/dynd-python,izaid/dynd-python,mwiebe/dynd-python,ContinuumIO/dynd-python,ContinuumIO/dynd-python,pombredanne/dynd-python,pombredanne/dynd-python,ContinuumIO/dynd-python,cpcloud/dynd-python,insertinterestingnamehere/dynd-python,izaid/dynd-python,mwiebe/dynd-python,cpcloud/dynd-python,mwiebe/dynd-python,mwiebe/dynd-python
dynd/tests/test_types_categorical.py
dynd/tests/test_types_categorical.py
import sys import unittest from dynd import nd, ndt class TestDType(unittest.TestCase): def test_make_categorical(self): # Create categorical type with 256 values tp = ndt.make_categorical(nd.range(0, 512, 2)) self.assertEqual(tp.type_id, 'categorical') self.assertEqual(tp.storage_type, ndt.uint8) self.assertEqual(tp.category_type, ndt.int32) # Create categorical type with 256 < x < 65536 values tp = ndt.make_categorical(nd.range(40000, dtype=ndt.float32)) self.assertEqual(tp.type_id, 'categorical') self.assertEqual(tp.storage_type, ndt.uint16) self.assertEqual(tp.category_type, ndt.float32) # Create categorical type with > 65536 values tp = ndt.make_categorical(nd.range(70000, dtype=ndt.int128)) self.assertEqual(tp.type_id, 'categorical') self.assertEqual(tp.storage_type, ndt.uint32) self.assertEqual(tp.category_type, ndt.int128) def test_factor_categorical(self): a = nd.array(["2012-05-10T02:29:42"] * 100, "datetime") dt1 = ndt.factor_categorical(a.date) #print (dt1) self.assertEqual(nd.as_py(dt1.categories.ucast(ndt.string)), ['2012-05-10']) def test_factor_fixedstring(self): adata = [('M', 13), ('F', 17), ('F', 34), ('M', 19), ('M', 13), ('F', 34), ('F', 22)] a = nd.array(adata, dtype='{gender: string[1], age: int32}') catdt = ndt.factor_categorical(a) b = a.ucast(catdt) x = repr(b) self.assertTrue('["M", 13]' in x) def test_rainbow_example(self): rainbow_vals = ['red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet'] color_vals = ['red', 'red', 'violet', 'blue', 'yellow', 'yellow', 'red', 'indigo'] color_vals_int = [rainbow_vals.index(x) for x in color_vals] # Create the type rainbow = ndt.make_categorical(rainbow_vals) # Make sure it looks the way we expect self.assertEqual(rainbow.type_id, 'categorical') self.assertEqual(rainbow.data_size, 1) self.assertEqual(rainbow.data_alignment, 1) self.assertEqual(rainbow.storage_type, ndt.uint8) self.assertEqual(rainbow.category_type, ndt.string) self.assertEqual(nd.as_py(rainbow.categories), rainbow_vals) # Create an array of the type colors = nd.array(color_vals, dtype=rainbow) # Make sure it is convertible back to strings/pyobject/int self.assertEqual(nd.as_py(colors), color_vals) self.assertEqual(nd.as_py(colors.ints), color_vals_int) if __name__ == '__main__': unittest.main()
bsd-2-clause
Python
9f73e60ba9d3775ef4dda9c815412f28ed80b518
Add new package: lzop (#17098)
LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack
var/spack/repos/builtin/packages/lzop/package.py
var/spack/repos/builtin/packages/lzop/package.py
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Lzop(CMakePackage): """lzop is a file compressor which is very similar to gzip. lzop uses the LZO data compression library for compression services, and its main advantages over gzip are much higher compression and decompression speed (at the cost of some compression ratio).""" homepage = "https://www.lzop.org" url = "https://www.lzop.org/download/lzop-1.03.tar.gz" version('1.04', sha256='7e72b62a8a60aff5200a047eea0773a8fb205caf7acbe1774d95147f305a2f41') version('1.03', sha256='c1425b8c77d49f5a679d5a126c90ea6ad99585a55e335a613cae59e909dbb2c9') version('1.01', sha256='28acd94d933befbc3af986abcfe833173fb7563b66533fdb4ac592f38bb944c7') depends_on('pkgconfig', type='build') depends_on('lzo')
lgpl-2.1
Python
1f5158d7c24e304b1b2ed2c374cd05aa662aa333
Add LruCache object.
strinking/statbot,strinking/statbot
statbot/cache.py
statbot/cache.py
# # cache.py # # statbot - Store Discord records for later analysis # Copyright (c) 2017 Ammon Smith # # statbot is available free of charge under the terms of the MIT # License. You are free to redistribute and/or modify it under those # terms. It is distributed in the hopes that it will be useful, but # WITHOUT ANY WARRANTY. See the LICENSE file for more details. # from collections import MutableMapping, OrderedDict __all__ = [ 'LruCache', ] class LruCache(MutableMapping): __slots__ = ( 'store', 'max_size', ) def __init__(self, max_size=None): self.store = OrderedDict() self.max_size = max_size def __getitem__(self, key): obj = self.store.pop(key) self.store[key] = obj return obj def get(self, key, default=None): try: return self[key] except KeyError: return default def __setitem__(self, key, value): self.store.pop(key, None) self.store[key] = value while len(self) > self.max_size: self.store.popitem(last=False) def __delitem__(self, key): del self.store[key] def __contains__(self, key): return key in self.store def __iter__(self): return iter(self.store) def __len__(self): return len(self.store)
mit
Python
0eb573afa067e23422c5a5495563a6f4d87a549d
Create soundcloud.py
nortxort/tinybot
apis/soundcloud.py
apis/soundcloud.py
""" Contains functions to fetch info from api.soundcloud.com """ from utilities import web # Soundcloud API key. SOUNDCLOUD_API_KEY = '4ce43a6430270a1eea977ff8357a25a3' def soundcloud_search(search): """ Searches soundcloud's API for a given search term. :param search: str the search term to search for. :return: dict{'type=soundcloud', 'video_id', 'video_time', 'video_title'} or None on no match or error. """ if search: search_url = 'http://api.soundcloud.com/tracks/?' \ 'filter=streamable&q=%s&limit=25&client_id=%s' % (search, SOUNDCLOUD_API_KEY) response = web.http_get(search_url, json=True) if response['json'] is not None: try: track_id = response['json'][0]['id'] track_time = response['json'][0]['duration'] track_title = response['json'][0]['title'].encode('ascii', 'ignore') return { 'type': 'soundCloud', 'video_id': track_id, 'video_time': track_time, 'video_title': track_title } except (IndexError, KeyError): return None return None def soundcloud_track_info(track_id): if track_id: info_url = 'http://api.soundcloud.com/tracks/%s?client_id=%s' % (track_id, SOUNDCLOUD_API_KEY) response = web.http_get(info_url, json=True) if response['json'] is not None: try: user_id = response['json'][0]['user_id'] track_time = response['json'][0]['duration'] track_title = response['json'][0]['title'].encode('ascii', 'ignore') return { 'type': 'soundCloud', 'video_id': track_id, 'video_time': track_time, 'video_title': track_title, 'user_id': user_id } except (IndexError, KeyError): return None return None
mit
Python
cc55fc564e84718710e1c29ca7e29be522aa70c5
Create OutputNeuronGroup_Liquid.py
ricardodeazambuja/BrianConnectUDP
examples/OutputNeuronGroup_Liquid.py
examples/OutputNeuronGroup_Liquid.py
''' Example of a spike receptor (only receives spikes) In this example spikes are received and processed creating a raster plot at the end of the simulation. ''' from brian import * import numpy from brian_multiprocess_udp import BrianConnectUDP # The main function with the NeuronGroup(s) and Synapse(s) must be named "main_NeuronGroup". # It will receive two objects: input_Neuron_Group and the simulation_clock. The input_Neuron_Group # will supply the input spikes to the network. The size of the spike train received equals NumOfNeuronsInput. # The size of the output spike train equals NumOfNeuronsOutput and must be the same size of the NeuronGroup who is # going to interface with the rest of the system to send spikes. # The function must return all the NeuronGroup objects and all the Synapse objects this way: # ([list of all NeuronGroups],[list of all Synapses]) # and the FIRST (index 0) NeuronGroup of the list MUST be the one where the OUTPUT spikes will be taken by the simulation. # # Here is also possible to use "dummy" NeuronGroups only to receive and/or send spikes. my_neuron_input_number = 135 def main_NeuronGroup(input_Neuron_Group, simulation_clock): print "main_NeuronGroup!" #DEBUG! simclock = simulation_clock Nr=NeuronGroup(my_neuron_input_number, model='v:1', reset=0, threshold=0.5, clock=simclock) Nr.v=0 # SYNAPSES BETWEEN REAL NEURON NETWORK AND THE INPUT Syn_iNG_Nr=Synapses(input_Neuron_Group, Nr, model='w:1', pre='v+=w', clock=simclock) Syn_iNG_Nr[:,:]='i==j' print "Total Number of Synapses:", len(Syn_iNG_Nr) #DEBUG! Syn_iNG_Nr.w=10 MExt=SpikeMonitor(Nr) # Spikes sent by UDP Mdummy=SpikeMonitor(input_Neuron_Group) # Spikes received by UDP return ([Nr],[Syn_iNG_Nr],[MExt,Mdummy]) def post_simulation_function(input_NG, simulation_NG, simulation_SYN, simulation_MN): """ input_NG: the neuron group that receives the input spikes simulation_NG: the neuron groups list passed to the system by the user function (main_NeuronGroup) simulation_SYN: the synapses list passed to the system by the user function (main_NeuronGroup) simulation_MN: the monitors list passed to the system by the user function (main_NeuronGroup) This way it is possible to plot, save or do whatever you want with these objects after the end of the simulation! """ # pass figure() raster_plot(simulation_MN[0]) title("Spikes Sent by UDP - Output") show(block=False) figure() raster_plot(simulation_MN[1]) title("Spikes Received by UDP - Output") show(block=True) # savefig('output.pdf') if __name__=="__main__": my_simulation = BrianConnectUDP(main_NeuronGroup, NumOfNeuronsInput=my_neuron_input_number, post_simulation_function=post_simulation_function, input_addresses=[("127.0.0.1", 22222, my_neuron_input_number)], simclock_dt=1, inputclock_dt=1, TotalSimulationTime=10000, sim_repetitions=0, brian_address=2)
cc0-1.0
Python
5feef18ca3dda099f33568bf0f2b189fe297a3e0
add test function rosenbrock
albahnsen/pyea
pyea/functions/__init__.py
pyea/functions/__init__.py
#TODO: Add documentation import numpy as np def func_rosenbrock(pop, a=1, b=100): # http://en.wikipedia.org/wiki/Rosenbrock_function x = pop[:, 0] y = pop[:, 1] return (a - x)**2 + b * (y - x**2)**2 def print_func(func, **kwargs): from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib import cm if func == 'rosenbrock': # Initial parammeters params = dict(range_=[-1.5, 1.5], step_=0.05) # Params in kwargs for param in params.keys(): if param in kwargs: params[param] = kwargs[param] # Fill grid x = np.arange(params['range_'][0], params['range_'][1], params['step_']) y = np.arange(params['range_'][0], params['range_'][1], params['step_']) x, y = np.meshgrid(x, y) pop = np.vstack((x.flatten(), y.flatten())).transpose() z = func_rosenbrock(pop) z = z.reshape(x.shape) # Plot fig = plt.figure() ax = fig.gca(projection='3d') surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.jet, linewidth=0, antialiased=False) plt.show()
bsd-3-clause
Python
378bd7f4d647243a1e736f4dc0bfd0742d5f3d0b
Create Combinations.py
UmassJin/Leetcode
Array/Combinations.py
Array/Combinations.py
``` Given two integers n and k, return all possible combinations of k numbers out of 1 ... n. For example, If n = 4 and k = 2, a solution is: [ [2,4], [3,4], [2,3], [1,2], [1,3], [1,4], ] ``` class Solution: # @return a list of lists of integers def combine(self, n, k): def combine_helper(n, k, start, depth, subres): if depth == k: result.append(subres) return for i in xrange(start, n+1): combine_helper(n, k, i+1, depth+1, subres+[i]) if n == 0 or k == 0: return [[]] result = [] combine_helper( n, k, 1, 0, []) return result
mit
Python
c042f640b1d841b6779cd69393b47ef65cfedfea
add problem 11
kulapard/projecteuler.net
python/problem_11.py
python/problem_11.py
# -*- coding: utf-8 -*- """ Largest product in a grid https://projecteuler.net/problem=11 """ GRID = """ 08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48 """ def adjacent_numbers_gen(grid): # right for i, row in enumerate(grid): for j, a in enumerate(row): if j + 3 == len(row): break b, c, d = row[j + 1], row[j + 2], row[j + 3] yield a, b, c, d # down for i, row in enumerate(grid): if i + 3 == len(grid): break for j, a in enumerate(row): b, c, d = grid[i + 1][j], grid[i + 2][j], grid[i + 3][j] yield a, b, c, d # diagonally right + down for i, row in enumerate(grid): if i + 3 == len(grid): break for j, a in enumerate(row): if j + 3 == len(row): break b, c, d = grid[i + 1][j + 1], grid[i + 2][j + 2], grid[i + 3][j + 3] yield a, b, c, d # diagonally left + down for i, row in enumerate(grid): if i + 3 == len(grid): break for j, a in enumerate(row): if j - 3 < 0: continue b, c, d = grid[i + 1][j - 1], grid[i + 2][j - 2], grid[i + 3][j - 3] yield a, b, c, d grid = [] for line in GRID.strip().split('\n'): grid.append([int(x.strip()) for x in line.split()]) max_product = 0 for a, b, c, d in adjacent_numbers_gen(grid): max_product = max(max_product, a * b * c * d) print max_product
mit
Python
474c5f977ab5b035567f0107c457622c51189ac6
Add new topics migration file
uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged
csunplugged/topics/migrations/0086_auto_20171108_0840.py
csunplugged/topics/migrations/0086_auto_20171108_0840.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2017-11-08 08:40 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('topics', '0085_auto_20171030_0035'), ] operations = [ migrations.AddField( model_name='programmingchallengelanguage', name='name_de', field=models.CharField(max_length=200, null=True), ), migrations.AddField( model_name='programmingchallengelanguage', name='name_en', field=models.CharField(max_length=200, null=True), ), migrations.AddField( model_name='programmingchallengelanguage', name='name_fr', field=models.CharField(max_length=200, null=True), ), migrations.AlterField( model_name='classroomresource', name='description', field=models.CharField(default='', max_length=100), ), migrations.AlterField( model_name='classroomresource', name='description_de', field=models.CharField(default='', max_length=100, null=True), ), migrations.AlterField( model_name='classroomresource', name='description_en', field=models.CharField(default='', max_length=100, null=True), ), migrations.AlterField( model_name='classroomresource', name='description_fr', field=models.CharField(default='', max_length=100, null=True), ), migrations.AlterField( model_name='curriculumarea', name='name', field=models.CharField(default='', max_length=100), ), migrations.AlterField( model_name='curriculumarea', name='name_de', field=models.CharField(default='', max_length=100, null=True), ), migrations.AlterField( model_name='curriculumarea', name='name_en', field=models.CharField(default='', max_length=100, null=True), ), migrations.AlterField( model_name='curriculumarea', name='name_fr', field=models.CharField(default='', max_length=100, null=True), ), migrations.AlterField( model_name='programmingchallengelanguage', name='name', field=models.CharField(max_length=200), ), ]
mit
Python
afe0f8fc731639cbe28798bb2a554c84ccbd8b2a
Create test_script.py
botlabio/autonomio,botlabio/autonomio
test_script.py
test_script.py
print "hello world"
mit
Python
a2bc05454ba166e3931fba130e44f49f66a79080
Add virtualshackles crawler
jodal/comics,jodal/comics,jodal/comics,datagutten/comics,klette/comics,datagutten/comics,datagutten/comics,datagutten/comics,klette/comics,jodal/comics,klette/comics
comics/comics/virtualshackles.py
comics/comics/virtualshackles.py
import re from comics.aggregator.crawler import CrawlerBase, CrawlerResult from comics.meta.base import MetaBase class Meta(MetaBase): name = 'Virtual Shackles' language = 'en' url = 'http://www.virtualshackles.com/' start_date = '2009-03-27' rights = 'Jeremy Vinar and Mike Fahmie' class Crawler(CrawlerBase): history_capable_days = 32 schedule = 'We,Fr' def crawl(self, pub_date): feed = self.parse_feed('http://feeds.feedburner.com/VirtualShackles?format=atom') for entry in feed.for_date(pub_date): url = entry.summary.src('img[src*="virtualshackles.com/img/"]') title = entry.title page_url = entry.raw_entry.feedburner_origlink page_url = re.sub(r'/(\d+/?)', '/-\g<1>', page_url) page = self.parse_page(page_url) orion = page.text('#orionComments') jack = page.text('#jackComments') if orion and jack: comments = u'orion: %s\n jack: %s' % (orion, jack) elif orion: comments = u'orion: %s' % (orion) elif jack: comments = u'jack: %s' % (jack) else: comments = None return CrawlerResult(url, title, comments)
agpl-3.0
Python
c13014c18496f35c4c94f156a18d442d3859f73b
Add assembly testing module
thomasgibson/firedrake-hybridization
testing_ops.py
testing_ops.py
from __future__ import absolute_import, print_function, division from firedrake import * mesh = UnitSquareMesh(2, 2, quadrilateral=False) n = FacetNormal(mesh) degree = 1 V = FunctionSpace(mesh, "RT", degree) U = FunctionSpace(mesh, "DG", degree - 1) W = V * U u, p = TrialFunctions(W) v, q = TestFunctions(W) a = (dot(u, v) + div(v)*p + q*div(u))*dx x = SpatialCoordinate(mesh) f = Function(U).assign(0) L = f*q*dx + 42*dot(v, n)*ds(4) bcs = [DirichletBC(W.sub(0), Expression(("0", "0")), (1, 2))] L = assemble(L) y = Function(W) for bc in bcs: bc.apply(y) rhs = assemble(L - assemble(action(a, y))) for bc in bcs: bc.apply(rhs)
mit
Python
71afe426a84789b65953ccd057014d17a11de859
Add a command to extend the generation_high from generation 1 to 2
mysociety/pombola,geoffkilpin/pombola,ken-muturi/pombola,geoffkilpin/pombola,geoffkilpin/pombola,hzj123/56th,geoffkilpin/pombola,hzj123/56th,mysociety/pombola,hzj123/56th,ken-muturi/pombola,patricmutwiri/pombola,patricmutwiri/pombola,hzj123/56th,mysociety/pombola,hzj123/56th,mysociety/pombola,mysociety/pombola,ken-muturi/pombola,ken-muturi/pombola,patricmutwiri/pombola,hzj123/56th,patricmutwiri/pombola,patricmutwiri/pombola,geoffkilpin/pombola,ken-muturi/pombola,ken-muturi/pombola,patricmutwiri/pombola,geoffkilpin/pombola,mysociety/pombola
mzalendo/core/management/commands/core_extend_areas_to_generation_2.py
mzalendo/core/management/commands/core_extend_areas_to_generation_2.py
# The import of data into Kenyan MapIt had the constituencies in # generation 2, while all the other area types were in generation 1. # This is unfortunate since it makes it appear to later import scripts # that the district type disappeared between generation 1 and 3. # # This script just extends the generation_high to generation 2 for # every area where it was set to generation 2. from django.core.management.base import NoArgsCommand from mapit.models import Area, Generation, Type, NameType, Country, CodeType class Command(NoArgsCommand): help = 'Change all genertion_high=1 to generation_high=2' def handle_noargs(self, **options): g1 = Generation.objects.get(id=1) g2 = Generation.objects.get(id=2) for area in Area.objects.filter(generation_high=g1): area.generation_high = g2 area.save()
agpl-3.0
Python
17fddbb1df78420aaebb811785b8e99769b45fa9
Create keyradius.py
meaton00/class_project
bin/keyradius.py
bin/keyradius.py
import csv from numpy import sqrt # Midpoint for Key Bridge x1 = 38.902543 y1 = -77.069830 # Threshold marker x2 = 38.900122 y2 = -77.071176 radius_squared = (x2 - x1)**2 + (y2 - y1)**2 radius = sqrt(radius_squared) data_file = open("IncidentData_24OCT14.csv", "rU") data = csv.DictReader(data_file) results = [] for row in data: lat = float(row["Latitude"]) lon = float(row["Longitude"]) distance_squared = ((lat) - x1)**2 + ((lon) - y1)**2 distance = sqrt(distance_squared) if distance <= radius: results.append({"Location": row["Location"], "Type": row["Standardized Type"], "Start Time": row["Time Opened"], "End Time": row["Time Closed"], "Latitude": row["Latitude"], "Longitude": row["Longitude"]}) f = open('radiuskeybridge.csv','wb') w = csv.DictWriter(f, fieldnames = ["Location", "Type", "Start Time", "End Time", "Latitude", "Longitude"]) w.writeheader() w.writerows(results)
mit
Python
fbc92f8400d4565a86b81329053a1302ce21c2f8
Add English Unique Pupil Number (UPN)
arthurdejong/python-stdnum,arthurdejong/python-stdnum,holvi/python-stdnum,arthurdejong/python-stdnum,holvi/python-stdnum,holvi/python-stdnum
stdnum/gb/upn.py
stdnum/gb/upn.py
# upn.py - functions for handling English UPNs # # Copyright (C) 2017 Arthur de Jong # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA """UPN (English Unique Pupil Number). The Unique Pupil Number (UPN) is a 13-character code that identifies pupils in English state schools and is designed to aid tracking pupil progress through the school system. The number consists of a check letter, a 3-digit LA (Local Authority) number for the issuing school, a 4-digit DfE number (School Establishment Number), 2 digits for the issue year and 3 digits for a serial number. Temporary numbers have a 2-digit serial and a letter. More information: * https://www.gov.uk/government/publications/unique-pupil-numbers >>> validate('B801200005001') 'B801200005001' >>> validate('A801200005001') Traceback (most recent call last): ... InvalidChecksum: ... >>> validate('X80120000A001') # middle part must be numeric Traceback (most recent call last): ... InvalidFormat: ... >>> validate('E000200005001') # LA number must be known Traceback (most recent call last): ... InvalidComponent: ... """ from stdnum.util import clean from stdnum.exceptions import * # The allowed characters in an UPN. _alphabet = 'ABCDEFGHJKLMNPQRTUVWXYZ0123456789' # The known values for the LA (Local Authority) number. # https://www.gov.uk/government/statistics/new-local-authority-codes-january-2011 _la_numbers = set(( 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 330, 331, 332, 333, 334, 335, 336, 340, 341, 342, 343, 344, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 370, 371, 372, 373, 380, 381, 382, 383, 384, 390, 391, 392, 393, 394, 420, 800, 801, 802, 803, 805, 806, 807, 808, 810, 811, 812, 813, 815, 816, 821, 822, 823, 825, 826, 830, 831, 835, 836, 837, 840, 841, 845, 846, 850, 851, 852, 855, 856, 857, 860, 861, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 908, 909, 916, 919, 921, 925, 926, 928, 929, 931, 933, 935, 936, 937, 938)) def compact(number): """Convert the number to the minimal representation. This strips the number of any valid separators and removes surrounding whitespace.""" return clean(number, ' ').upper().strip() def calc_check_digit(number): """Calculate the check digit for the number.""" check = sum(i * _alphabet.index(n) for i, n in enumerate(number[-12:], 2)) % 23 return _alphabet[check] def validate(number): """Check to see if the number provided is a valid UPN. This checks length, formatting and check digits.""" number = compact(number) if len(number) != 13: raise InvalidLength() if not number[1:-1].isdigit() or number[-1] not in _alphabet: raise InvalidFormat() if int(number[1:4]) not in _la_numbers: raise InvalidComponent() if calc_check_digit(number[1:]) != number[0]: raise InvalidChecksum() return number def is_valid(number): """Check to see if the number provided is a valid UPN. This checks length, formatting and check digits.""" try: return bool(validate(number)) except ValidationError: return False
lgpl-2.1
Python
c27e97ea959a9863e57ce12b0afc5fa092562548
Create Character.py
jtrip/slackrun
Character.py
Character.py
import untangle class Character(object): def __init__(self, data): assert isinstance(data, dict) self.__dict__ = data def get_data(character): ''' :param character: String, character name :return: Dictionary, character data ''' path = 'tests\\' filepath = path + character + '.chum5' try: c = untangle.parse(filepath) except IOError: print("Error: can't find file or read data") data = { 'Name': c.character.name.cdata, 'imageURL': str(c.character.notes.cdata), 'Charisma': int(c.character.attributes.attribute[4].value.cdata), 'Intelligence': int(c.character.attributes.attribute[5].value.cdata), 'Hacking': int(c.character.skills.skill[37].rating.cdata), 'Seduction': int(c.character.skills.skill[37].rating.cdata) } return data ## LIMITS WILL NEED TO BE CALCULATED # Inherent Limits Add appropriate attribute(s); calculate as listed below — # Mental [(Logic x 2) + Intuition + Willpower] / 3 (round up) — # Physical [(Strength x 2) + Body + Reaction] / 3 (round up) — # Social [(Charisma x 2) + Willpower + Essence] / 3 (round up) ##
bsd-2-clause
Python
983d6b12db4050ff7d252e1717adbfe39add2f49
Add missing file
apache/incubator-allura,lym/allura-git,leotrubach/sourceforge-allura,apache/incubator-allura,leotrubach/sourceforge-allura,apache/allura,apache/allura,heiths/allura,heiths/allura,heiths/allura,lym/allura-git,apache/allura,heiths/allura,lym/allura-git,Bitergia/allura,leotrubach/sourceforge-allura,Bitergia/allura,apache/allura,apache/incubator-allura,lym/allura-git,apache/allura,lym/allura-git,leotrubach/sourceforge-allura,Bitergia/allura,heiths/allura,Bitergia/allura,Bitergia/allura,apache/incubator-allura
Allura/allura/lib/widgets/auth_widgets.py
Allura/allura/lib/widgets/auth_widgets.py
import ew as ew_core import ew.jinja2_ew as ew from ew.core import validator from pylons import request from formencode import Invalid from webob import exc from .forms import ForgeForm from allura.lib import plugin class LoginForm(ForgeForm): submit_text='Login' style='wide' class fields(ew_core.NameList): username = ew.TextField(label='Username') password = ew.PasswordField(label='Password') class hidden_fields(ew_core.NameList): return_to = ew.HiddenField() @validator def validate(self, value, state=None): try: value['username'] = plugin.AuthenticationProvider.get(request).login() except exc.HTTPUnauthorized: msg = 'Invalid login' raise Invalid( msg, dict(username=value['username']), None) return value
apache-2.0
Python
c349e8df72c09a98fe6b038c763c41008bef70a2
add migration for reimporting universities
hydroshare/hydroshare,hydroshare/hydroshare,hydroshare/hydroshare,hydroshare/hydroshare,hydroshare/hydroshare
hs_dictionary/migrations/0005_reimport_universities.py
hs_dictionary/migrations/0005_reimport_universities.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals import csv import os from django.db import migrations, models from hs_dictionary.models import University def forwards(apps, schema_editor): University.objects.all().delete() with open(os.path.dirname(__file__) + "/world-universities.csv") as f: reader = csv.reader(f) for i, line in enumerate(reader): University.objects.create( name=line[1], country_code=line[0], url=line[2] ) class Migration(migrations.Migration): dependencies = [ ('hs_dictionary', '0004_merge'), ] operations = [ migrations.RunPython(forwards) ]
bsd-3-clause
Python
d77e8df3fa913e7a60c1870e49a6b6197d7a9125
Add tests for zerver/views/realm_emoji.py.
zulip/zulip,dhcrzf/zulip,souravbadami/zulip,showell/zulip,brockwhittaker/zulip,rishig/zulip,blaze225/zulip,kou/zulip,Juanvulcano/zulip,dattatreya303/zulip,Jianchun1/zulip,krtkmj/zulip,arpith/zulip,jphilipsen05/zulip,synicalsyntax/zulip,hackerkid/zulip,dhcrzf/zulip,sonali0901/zulip,vikas-parashar/zulip,eeshangarg/zulip,vikas-parashar/zulip,verma-varsha/zulip,Vallher/zulip,Diptanshu8/zulip,tommyip/zulip,zulip/zulip,sup95/zulip,calvinleenyc/zulip,paxapy/zulip,sonali0901/zulip,brockwhittaker/zulip,Vallher/zulip,timabbott/zulip,susansls/zulip,blaze225/zulip,rht/zulip,punchagan/zulip,kou/zulip,andersk/zulip,Jianchun1/zulip,reyha/zulip,jrowan/zulip,shubhamdhama/zulip,dhcrzf/zulip,jackrzhang/zulip,joyhchen/zulip,synicalsyntax/zulip,rishig/zulip,cosmicAsymmetry/zulip,verma-varsha/zulip,blaze225/zulip,paxapy/zulip,rht/zulip,ahmadassaf/zulip,Galexrt/zulip,zacps/zulip,sonali0901/zulip,mohsenSy/zulip,zulip/zulip,susansls/zulip,Galexrt/zulip,dattatreya303/zulip,dattatreya303/zulip,amyliu345/zulip,rishig/zulip,paxapy/zulip,timabbott/zulip,Vallher/zulip,hackerkid/zulip,grave-w-grave/zulip,Galexrt/zulip,timabbott/zulip,susansls/zulip,dawran6/zulip,aakash-cr7/zulip,cosmicAsymmetry/zulip,sharmaeklavya2/zulip,eeshangarg/zulip,samatdav/zulip,jackrzhang/zulip,kou/zulip,Vallher/zulip,reyha/zulip,isht3/zulip,brockwhittaker/zulip,reyha/zulip,PhilSk/zulip,vabs22/zulip,hackerkid/zulip,rht/zulip,JPJPJPOPOP/zulip,hackerkid/zulip,Galexrt/zulip,arpith/zulip,shubhamdhama/zulip,sup95/zulip,sup95/zulip,tommyip/zulip,zulip/zulip,blaze225/zulip,vaidap/zulip,shubhamdhama/zulip,aakash-cr7/zulip,reyha/zulip,grave-w-grave/zulip,ryanbackman/zulip,rht/zulip,samatdav/zulip,TigorC/zulip,vikas-parashar/zulip,AZtheAsian/zulip,samatdav/zulip,punchagan/zulip,j831/zulip,isht3/zulip,joyhchen/zulip,rishig/zulip,KingxBanana/zulip,eeshangarg/zulip,niftynei/zulip,sharmaeklavya2/zulip,KingxBanana/zulip,jackrzhang/zulip,peguin40/zulip,JPJPJPOPOP/zulip,grave-w-grave/zulip,cosmicAsymmetry/zulip,vikas-parashar/zulip,vabs22/zulip,jphilipsen05/zulip,souravbadami/zulip,dawran6/zulip,christi3k/zulip,isht3/zulip,arpith/zulip,Vallher/zulip,peguin40/zulip,sharmaeklavya2/zulip,joyhchen/zulip,Jianchun1/zulip,sup95/zulip,blaze225/zulip,umkay/zulip,calvinleenyc/zulip,JPJPJPOPOP/zulip,vikas-parashar/zulip,jphilipsen05/zulip,brockwhittaker/zulip,hackerkid/zulip,shubhamdhama/zulip,ahmadassaf/zulip,mahim97/zulip,joyhchen/zulip,grave-w-grave/zulip,zulip/zulip,SmartPeople/zulip,sharmaeklavya2/zulip,brainwane/zulip,joyhchen/zulip,grave-w-grave/zulip,ahmadassaf/zulip,synicalsyntax/zulip,umkay/zulip,dawran6/zulip,mahim97/zulip,eeshangarg/zulip,ahmadassaf/zulip,ryanbackman/zulip,souravbadami/zulip,jrowan/zulip,susansls/zulip,krtkmj/zulip,andersk/zulip,amanharitsh123/zulip,paxapy/zulip,zacps/zulip,amanharitsh123/zulip,shubhamdhama/zulip,blaze225/zulip,arpith/zulip,Galexrt/zulip,vaidap/zulip,showell/zulip,niftynei/zulip,grave-w-grave/zulip,mohsenSy/zulip,kou/zulip,j831/zulip,Galexrt/zulip,showell/zulip,TigorC/zulip,brainwane/zulip,susansls/zulip,brainwane/zulip,peguin40/zulip,kou/zulip,TigorC/zulip,vaidap/zulip,paxapy/zulip,Juanvulcano/zulip,punchagan/zulip,isht3/zulip,umkay/zulip,andersk/zulip,jainayush975/zulip,mahim97/zulip,vaidap/zulip,j831/zulip,jackrzhang/zulip,rishig/zulip,punchagan/zulip,KingxBanana/zulip,Juanvulcano/zulip,christi3k/zulip,j831/zulip,umkay/zulip,KingxBanana/zulip,timabbott/zulip,Diptanshu8/zulip,amyliu345/zulip,andersk/zulip,mohsenSy/zulip,PhilSk/zulip,showell/zulip,brockwhittaker/zulip,andersk/zulip,krtkmj/zulip,Diptanshu8/zulip,jphilipsen05/zulip,brockwhittaker/zulip,dawran6/zulip,niftynei/zulip,timabbott/zulip,umkay/zulip,souravbadami/zulip,PhilSk/zulip,christi3k/zulip,Galexrt/zulip,synicalsyntax/zulip,kou/zulip,aakash-cr7/zulip,KingxBanana/zulip,timabbott/zulip,showell/zulip,brainwane/zulip,brainwane/zulip,reyha/zulip,zacps/zulip,dhcrzf/zulip,christi3k/zulip,dawran6/zulip,tommyip/zulip,jrowan/zulip,j831/zulip,peguin40/zulip,tommyip/zulip,tommyip/zulip,punchagan/zulip,AZtheAsian/zulip,amyliu345/zulip,zacps/zulip,niftynei/zulip,mohsenSy/zulip,mohsenSy/zulip,niftynei/zulip,sharmaeklavya2/zulip,ahmadassaf/zulip,isht3/zulip,hackerkid/zulip,vabs22/zulip,paxapy/zulip,Jianchun1/zulip,Diptanshu8/zulip,jainayush975/zulip,timabbott/zulip,showell/zulip,PhilSk/zulip,synicalsyntax/zulip,dhcrzf/zulip,ahmadassaf/zulip,andersk/zulip,verma-varsha/zulip,brainwane/zulip,amanharitsh123/zulip,verma-varsha/zulip,amyliu345/zulip,cosmicAsymmetry/zulip,sonali0901/zulip,SmartPeople/zulip,TigorC/zulip,reyha/zulip,sup95/zulip,aakash-cr7/zulip,jainayush975/zulip,SmartPeople/zulip,hackerkid/zulip,SmartPeople/zulip,verma-varsha/zulip,zulip/zulip,AZtheAsian/zulip,umkay/zulip,punchagan/zulip,jainayush975/zulip,rishig/zulip,brainwane/zulip,j831/zulip,Juanvulcano/zulip,jainayush975/zulip,krtkmj/zulip,souravbadami/zulip,sonali0901/zulip,jrowan/zulip,cosmicAsymmetry/zulip,peguin40/zulip,TigorC/zulip,samatdav/zulip,synicalsyntax/zulip,mohsenSy/zulip,mahim97/zulip,isht3/zulip,ryanbackman/zulip,mahim97/zulip,jainayush975/zulip,PhilSk/zulip,eeshangarg/zulip,vabs22/zulip,christi3k/zulip,rishig/zulip,krtkmj/zulip,sonali0901/zulip,vaidap/zulip,Juanvulcano/zulip,zacps/zulip,rht/zulip,eeshangarg/zulip,vikas-parashar/zulip,Jianchun1/zulip,jackrzhang/zulip,JPJPJPOPOP/zulip,ryanbackman/zulip,showell/zulip,Vallher/zulip,ryanbackman/zulip,AZtheAsian/zulip,amyliu345/zulip,Juanvulcano/zulip,susansls/zulip,niftynei/zulip,tommyip/zulip,dattatreya303/zulip,rht/zulip,calvinleenyc/zulip,jrowan/zulip,Diptanshu8/zulip,peguin40/zulip,verma-varsha/zulip,zulip/zulip,amanharitsh123/zulip,SmartPeople/zulip,samatdav/zulip,arpith/zulip,zacps/zulip,dattatreya303/zulip,sup95/zulip,amanharitsh123/zulip,JPJPJPOPOP/zulip,andersk/zulip,sharmaeklavya2/zulip,jackrzhang/zulip,vabs22/zulip,dattatreya303/zulip,synicalsyntax/zulip,cosmicAsymmetry/zulip,eeshangarg/zulip,ryanbackman/zulip,krtkmj/zulip,SmartPeople/zulip,calvinleenyc/zulip,dawran6/zulip,vaidap/zulip,calvinleenyc/zulip,arpith/zulip,samatdav/zulip,Jianchun1/zulip,Diptanshu8/zulip,jackrzhang/zulip,JPJPJPOPOP/zulip,calvinleenyc/zulip,ahmadassaf/zulip,aakash-cr7/zulip,souravbadami/zulip,aakash-cr7/zulip,kou/zulip,jphilipsen05/zulip,vabs22/zulip,shubhamdhama/zulip,jphilipsen05/zulip,umkay/zulip,AZtheAsian/zulip,punchagan/zulip,mahim97/zulip,dhcrzf/zulip,shubhamdhama/zulip,PhilSk/zulip,christi3k/zulip,TigorC/zulip,KingxBanana/zulip,krtkmj/zulip,tommyip/zulip,amanharitsh123/zulip,Vallher/zulip,joyhchen/zulip,jrowan/zulip,AZtheAsian/zulip,dhcrzf/zulip,amyliu345/zulip,rht/zulip
zerver/tests/test_realm_emoji.py
zerver/tests/test_realm_emoji.py
# -*- coding: utf-8 -*- from __future__ import absolute_import from zerver.lib.actions import get_realm, check_add_realm_emoji from zerver.lib.test_helpers import AuthedTestCase import ujson class RealmEmojiTest(AuthedTestCase): def test_list(self): self.login("iago@zulip.com") realm = get_realm('zulip.com') check_add_realm_emoji(realm, "my_emoji", "https://example.com/my_emoji") result = self.client.get("/json/realm/emoji") self.assert_json_success(result) self.assertEqual(200, result.status_code) content = ujson.loads(result.content) self.assertEqual(len(content["emoji"]), 1) def test_upload(self): self.login("iago@zulip.com") data = {"name": "my_emoji", "url": "https://example.com/my_emoji"} result = self.client_put("/json/realm/emoji", info=data) self.assert_json_success(result) self.assertEqual(200, result.status_code) result = self.client.get("/json/realm/emoji") content = ujson.loads(result.content) self.assert_json_success(result) self.assertEqual(len(content["emoji"]), 1) def test_upload_exception(self): self.login("iago@zulip.com") data = {"name": "my_em*/oji", "url": "https://example.com/my_emoji"} result = self.client_put("/json/realm/emoji", info=data) self.assert_json_error(result, u'Invalid characters in Emoji name') def test_delete(self): self.login("iago@zulip.com") realm = get_realm('zulip.com') check_add_realm_emoji(realm, "my_emoji", "https://example.com/my_emoji") result = self.client_delete("/json/realm/emoji/my_emoji") self.assert_json_success(result) result = self.client.get("/json/realm/emoji") content = ujson.loads(result.content) self.assert_json_success(result) self.assertEqual(len(content["emoji"]), 0)
apache-2.0
Python
6cb66978e44d447fd210dd92de194659b5f33fb3
Add debug util for WORKSPACE.
googlecloudrobotics/core,googlecloudrobotics/core,googlecloudrobotics/core
bazel/debug_repository.bzl
bazel/debug_repository.bzl
"""Debug util for repository definitions.""" def debug_repository(repo, *fields): """debug_repository(repo) identifies which version of a repository has been defined in the WORKSPACE by printing some of its fields. Example: # at the bottom of the WORKSPACE file load("//bazel:debug_repository.bzl", "debug_repository") debug_repository("org_golang_x_net") If needed, you can override the printed fields by passing additional parameters: debug_repository("io_grpc_grpc_java", "patches", "urls") """ if len(fields) == 0: fields = ["branch", "commit", "tag", "url", "urls"] rule = native.existing_rule(repo) if rule == None: print(repo, "not found") return for f in fields: if f in rule and len(rule[f]) > 0: print(repo, f, rule[f])
apache-2.0
Python
316bef330c0770739e95f9c1108e07697655d27e
fix when multi python version bug
wufeifei/cobra,wufeifei/cobra,braveghz/cobra,wufeifei/cobra,braveghz/cobra,40huo/cobra,braveghz/cobra,wufeifei/cobra,braveghz/cobra,40huo/cobra,40huo/cobra,40huo/cobra,wufeifei/cobra,40huo/cobra,braveghz/cobra,wufeifei/cobra,braveghz/cobra,40huo/cobra
cobra/__version__.py
cobra/__version__.py
import sys import platform __title__ = 'cobra' __description__ = 'Code Security Audit' __url__ = 'https://github.com/wufeifei/cobra' __issue_page__ = 'https://github.com/wufeifei/cobra/issues/new' __python_version__ = sys.version.split()[0] __platform__ = platform.platform() __version__ = '2.0.0-alpha' __author__ = 'Feei' __author_email__ = 'feei@feei.cn' __license__ = 'MIT License' __copyright__ = 'Copyright (C) 2017 Feei. All Rights Reserved' __introduction__ = """ ,---. | | ,---.|---.,---.,---. | | || || ,---| `---``---``---`` `---^ v{version} GitHub: https://github.com/wufeifei/cobra Cobra is a static code analysis system that automates the detecting vulnerabilities and security issue.""".format(version=__version__) __epilog__ = """Usage: python {m} -t {td} python {m} -t {td} -r cvi-190001,cvi-190002 python {m} -t {td} -f json -o /tmp/report.json python {m} -t {tg} -f json -o feei@feei.cn python {m} -t {tg} -f json -o http://push.to.com/api sudo python {m} -H 127.0.0.1 -P 80 """.format(m='cobra.py', td='tests/vulnerabilities', tg='https://github.com/ethicalhack3r/DVWA')
import sys import platform __title__ = 'cobra' __description__ = 'Code Security Audit' __url__ = 'https://github.com/wufeifei/cobra' __issue_page__ = 'https://github.com/wufeifei/cobra/issues/new' __python_version__ = sys.version.split()[0] __platform__ = platform.platform() __version__ = '2.0.0-alpha' __author__ = 'Feei' __author_email__ = 'feei@feei.cn' __license__ = 'MIT License' __copyright__ = 'Copyright (C) 2017 Feei. All Rights Reserved' __introduction__ = """ ,---. | | ,---.|---.,---.,---. | | || || ,---| `---``---``---`` `---^ v{version} GitHub: https://github.com/wufeifei/cobra Cobra is a static code analysis system that automates the detecting vulnerabilities and security issue.""".format(version=__version__) __epilog__ = """Usage: {m} -t {td} {m} -t {td} -r cvi-190001,cvi-190002 {m} -t {td} -f json -o /tmp/report.json {m} -t {tg} -f json -o feei@feei.cn {m} -t {tg} -f json -o http://push.to.com/api sudo {m} -H 127.0.0.1 -P 80 """.format(m='./cobra.py', td='tests/vulnerabilities', tg='https://github.com/ethicalhack3r/DVWA')
mit
Python
b67041367fcc10da7879c123ab44671f258ef649
support script in python for bootstrapping erlang on a new erts
ericbmerritt/sinan,erlware-deprecated/sinan,ericbmerritt/sinan,ericbmerritt/sinan,erlware-deprecated/sinan,erlware-deprecated/sinan
support/build.py
support/build.py
#! /bin/python """Support for building sinan, bootstraping it on a new version of erlang""" import sys import os import commands from optparse import OptionParser class BuildError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) ERTS_VERSION = "5.6.3" BUILD_PATH = "_build/development/apps/%s/ebin" ERLWARE_PATH = "/usr/local/erlware" ERLC = "erlc +debug_info " LOCAL_APPS = [("etask", "0.5.0"), ("sinan", "0.10.0.14"), ("sinan_web_api", "0.1.0.4")] ERLWARE_APPS = ["fconf-0.3.0.0", "ktuo-0.4.0.1", "crary-0.2.3", "eunit-2.0", "cryptographic-0.2.1", "ewlib-0.8.2.0", "ewrepo-0.18.6.0", "gas-6.1.1", "kernel-2.12.3", "ibrowse-1.4", "uri-0.2.0"] def generate_local_path(app): ebin = "_build/development/apps/%s-%s/ebin" % (app[0], app[1]) include = "_build/development/apps/%s-%s/include" % (app[0], app[1]) if not os.path.isdir(ebin): raise BuildError(ebin + " is not a directory") return " -pa %s -I %s " % (ebin, include) def generate_erlware_path(path): ebin = "%s/packages/%s/lib/%s/ebin" % (ERLWARE_PATH, ERTS_VERSION, path) include = "%s/packages/%s/lib/%s/include" % (ERLWARE_PATH, ERTS_VERSION, path) if not os.path.isdir(ebin): raise BuildError(ebin + " is not a directory") return " -pa %s -I %s " % (ebin, include) def compile_app(app): ebin = "_build/development/apps/%s-%s/ebin" % (app[0], app[1]) compile_command = ("erlc +debug_info %s %s -o %s/ ./server/%s/src/*.erl" % (' '.join(map(generate_local_path, LOCAL_APPS)), ' '.join(map(generate_erlware_path, ERLWARE_APPS)), ebin, app[0])) (status, out) = commands.getstatusoutput(compile_command) if 0 != status: raise BuildError(out) def compile_apps(): for app in LOCAL_APPS: compile_app(app) def main(): parser = OptionParser() parser.add_option("-e", "--erlware", dest="erlware", type="string", default="/usr/local/erlware", help="The location of Erlware") (options, args) = parser.parse_args() ERLWARE_PATH = options.erlware compile_apps() if __name__ == "__main__": main()
mit
Python
1483b352683ecf126e1063c3a6fa2f07dcdb7720
add new module.wq
cellnopt/cellnopt,cellnopt/cellnopt
cno/core/gtt.py
cno/core/gtt.py
import numpy as np import pylab import pandas as pd from biokit.rtools import RSession from cno.core import CNORBase from easydev import TempFile __all__ = ['GTTBool'] class GTTBool(CNORBase): """ :: from cno import * c = cnorbool.CNORbool(cnodata("PKN-ToyMMB.sif"), cnodata("MD-ToyMMB.csv"), verboseR=False) c.optimise(reltol=0.5) c.optimise(reltol=0.5) g = gtt.GTTBool(c._model, c.data, c.models, c.results.scores) d = g.get_gtt() """ def __init__(self, model, data, models, scores, verboseR=False): """ Note that once, grouped, the scores should be identical albeit the model size [scores[i] for i in grouped.groups.values()[10]] :param model: a instance of :class:`CNOGraph` :param data: an instance of :class:`XMIDAS` :param models: an instance of compatible :class:`Models` :param scores: the scores of each model. """ CNORBase.__init__(self, verboseR) self.models = models self.scores = scores self.model = model self.data = data # a MIDAS file def _init(self): fhmodel = TempFile() fhdata = TempFile() self.model.to_sif(fhmodel.name) self.data.to_midas(fhdata.name) self.session.run("library(CellNOptR)") self.session.run('model=readSIF("%s")' % fhmodel.name) self.session.run('cnolist=CNOlist("%s")' % fhdata.name) def _get_sim(self, bs): self.session.bs1 = bs script = """ png() output = cutAndPlot(cnolist, model, list(bs1), plotPDF=F) dev.off() """ self.session.run(script) res = self.session.output['simResults'][0] res = list(res['t0'].flatten() ) + list(res['t1'].flatten()) return res def get_gtt(self): print("init R library") self._init() N = len(self.models) from easydev import progress_bar b = progress_bar(N) d = {} for i in range(0, N): res = np.array(self._get_sim(self.models.df.ix[i].values)) b.animate(i, N) d[i] = res df = pd.DataFrame(d).transpose() grouped = df.groupby(list(df.columns)) pylab.hist([len(this) for this in grouped.groups.values()], 100) return {'simulation': d, 'grouped':grouped}
bsd-2-clause
Python
e307bf72a8aa21088d491c90efd9a731014e63f1
move states into separate file
State/stacks,cfstacks/stacks
stacks/states.py
stacks/states.py
FAILED_STACK_STATES = [ 'CREATE_FAILED', 'ROLLBACK_FAILED', 'DELETE_FAILED', 'UPDATE_ROLLBACK_FAILED' ] COMPLETE_STACK_STATES = [ 'CREATE_COMPLETE', 'UPDATE_COMPLETE', ] ROLLBACK_STACK_STATES = [ 'ROLLBACK_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE', ] IN_PROGRESS_STACK_STATES = [ 'CREATE_IN_PROGRESS', 'ROLLBACK_IN_PROGRESS', 'DELETE_IN_PROGRESS', 'UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS', 'UPDATE_ROLLBACK_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', ]
mit
Python
13dfb4f7d4972edbc7ccc0e4f62ea3db1a5b16f4
Add ctx module
mindriot101/srw
srw/ctx.py
srw/ctx.py
def wd2jd(wd): jd_ref = 2453005.5 return (wd / 86400.) + jd_ref
mit
Python
19acf7ad2a14b71f672d02cb8cb47a4393665bc7
Add benchmarks
sgillies/geometry-collections
bin/serialize.py
bin/serialize.py
"""Timing serializtion of deeply nested geometry collections. To and from JSON using dumps and loads from Python's json module. I'm happy to report that writing such GeoJSON geometry collections is more expensive than parsing them and, at least for Python, deeply nested geometry collections aren't an asymmetric attack vector. """ from json import dumps, loads import timeit geom = {'type': 'Point', 'coordinates': [0.0, 0.0]} for i in range(100): geom = {'type': 'GeometryCollection', 'geometries': [geom]} text = dumps(geom) # Time dumps. print("Dumps") print( timeit.timeit( "dumps(geom)", setup="from __main__ import dumps, geom", number=10000)) # Time loads. print("Loads") print( timeit.timeit( "loads(text)", setup="from __main__ import loads, text", number=10000))
mit
Python
7c9b97a81d4c8e41ce81cc881d30323dfb1f9c72
Add layer normalization
ronekko/chainer,wkentaro/chainer,wkentaro/chainer,anaruse/chainer,jnishi/chainer,chainer/chainer,tkerola/chainer,hvy/chainer,cupy/cupy,kashif/chainer,niboshi/chainer,ktnyt/chainer,ktnyt/chainer,chainer/chainer,wkentaro/chainer,rezoo/chainer,ysekky/chainer,jnishi/chainer,ktnyt/chainer,okuta/chainer,okuta/chainer,pfnet/chainer,cupy/cupy,niboshi/chainer,wkentaro/chainer,niboshi/chainer,hvy/chainer,chainer/chainer,chainer/chainer,delta2323/chainer,okuta/chainer,hvy/chainer,jnishi/chainer,niboshi/chainer,kiyukuta/chainer,keisuke-umezawa/chainer,keisuke-umezawa/chainer,keisuke-umezawa/chainer,jnishi/chainer,cupy/cupy,hvy/chainer,keisuke-umezawa/chainer,okuta/chainer,cupy/cupy,ktnyt/chainer,aonotas/chainer
chainer/links/normalization/layer_normalization.py
chainer/links/normalization/layer_normalization.py
from chainer import functions from chainer import initializers from chainer import link from chainer import links class LayerNormalization(link.Chain): """Layer normalization layer on outputs of linear functions. This is a link of "Layer Normalization". This layer normalizes, scales and shifts input units with :link:`~chainer.links.Scale`. Args: size (int): Size of input units. See: `Layer Normalization <https://arxiv.org/abs/1607.06450>`_ """ def __init__(self, size, eps=1e-6, initial_gamma=None, initial_beta=None): super(LayerNormalization, self).__init__( scale=links.Scale(axis=1, W_shape=(size, ), bias_term=True), ) if initial_gamma is None: initial_gamma = initializers.One() initializers.init_weight(self.scale.W.data, initial_gamma) if initial_beta is None: initial_beta = initializers.Zero() initializers.init_weight(self.scale.bias.b.data, initial_beta) self.eps = eps def normalize(self, x): size = x.shape[1] mean = functions.broadcast_to( (functions.sum(x, axis=1) / size)[:, None], x.shape) std = functions.broadcast_to(functions.sqrt( functions.sum(functions.square(x - mean), axis=1) / size)[:, None], x.shape) + self.eps return (x - mean) / std def __call__(self, x): return self.scale(self.normalize(x))
mit
Python
b88b97c7d56506804fc9eb93ce7074454fc492f3
Add the migration for designations.
hello-base/web,hello-base/web,hello-base/web,hello-base/web
base/apps/people/migrations/0002_auto_20141223_0316.py
base/apps/people/migrations/0002_auto_20141223_0316.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('people', '0001_initial'), ] operations = [ migrations.CreateModel( name='Designation', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=60)), ('romanized_name', models.CharField(max_length=60)), ('started', models.DateField(db_index=True)), ('ended', models.DateField(db_index=True, null=True, blank=True)), ('group', models.ForeignKey(related_name=b'designations', to='people.Group')), ], options={ 'get_latest_by': 'started', }, bases=(models.Model,), ), migrations.AlterOrderWithRespectTo( name='designation', order_with_respect_to='group', ), ]
apache-2.0
Python
f3dbe9bb2aa627b3485c2ed44f889a1bc5463081
Bump to version 3.1.3
delinhabit/django-rest-framework,callorico/django-rest-framework,ajaali/django-rest-framework,wangpanjun/django-rest-framework,antonyc/django-rest-framework,kylefox/django-rest-framework,sbellem/django-rest-framework,kennydude/django-rest-framework,thedrow/django-rest-framework-1,wzbozon/django-rest-framework,AlexandreProenca/django-rest-framework,linovia/django-rest-framework,cyberj/django-rest-framework,ezheidtmann/django-rest-framework,YBJAY00000/django-rest-framework,abdulhaq-e/django-rest-framework,damycra/django-rest-framework,tomchristie/django-rest-framework,elim/django-rest-framework,jness/django-rest-framework,nhorelik/django-rest-framework,adambain-vokal/django-rest-framework,edx/django-rest-framework,vstoykov/django-rest-framework,tomchristie/django-rest-framework,sehmaschine/django-rest-framework,hnarayanan/django-rest-framework,kgeorgy/django-rest-framework,lubomir/django-rest-framework,jpadilla/django-rest-framework,fishky/django-rest-framework,uploadcare/django-rest-framework,kennydude/django-rest-framework,nryoung/django-rest-framework,James1345/django-rest-framework,uploadcare/django-rest-framework,aericson/django-rest-framework,douwevandermeij/django-rest-framework,nhorelik/django-rest-framework,jness/django-rest-framework,gregmuellegger/django-rest-framework,jpadilla/django-rest-framework,hnakamur/django-rest-framework,buptlsl/django-rest-framework,hunter007/django-rest-framework,abdulhaq-e/django-rest-framework,wwj718/django-rest-framework,jness/django-rest-framework,johnraz/django-rest-framework,sheppard/django-rest-framework,cheif/django-rest-framework,wedaly/django-rest-framework,jpulec/django-rest-framework,davesque/django-rest-framework,gregmuellegger/django-rest-framework,callorico/django-rest-framework,VishvajitP/django-rest-framework,VishvajitP/django-rest-framework,antonyc/django-rest-framework,simudream/django-rest-framework,maryokhin/django-rest-framework,simudream/django-rest-framework,krinart/django-rest-framework,uruz/django-rest-framework,rafaelang/django-rest-framework,pombredanne/django-rest-framework,raphaelmerx/django-rest-framework,pombredanne/django-rest-framework,sbellem/django-rest-framework,linovia/django-rest-framework,canassa/django-rest-framework,brandoncazander/django-rest-framework,ashishfinoit/django-rest-framework,HireAnEsquire/django-rest-framework,hnakamur/django-rest-framework,bluedazzle/django-rest-framework,jerryhebert/django-rest-framework,ashishfinoit/django-rest-framework,werthen/django-rest-framework,ezheidtmann/django-rest-framework,potpath/django-rest-framework,jpulec/django-rest-framework,raphaelmerx/django-rest-framework,cyberj/django-rest-framework,cheif/django-rest-framework,yiyocx/django-rest-framework,johnraz/django-rest-framework,iheitlager/django-rest-framework,rubendura/django-rest-framework,damycra/django-rest-framework,uruz/django-rest-framework,krinart/django-rest-framework,kezabelle/django-rest-framework,cheif/django-rest-framework,lubomir/django-rest-framework,jerryhebert/django-rest-framework,damycra/django-rest-framework,bluedazzle/django-rest-framework,atombrella/django-rest-framework,simudream/django-rest-framework,nryoung/django-rest-framework,paolopaolopaolo/django-rest-framework,delinhabit/django-rest-framework,ezheidtmann/django-rest-framework,James1345/django-rest-framework,antonyc/django-rest-framework,iheitlager/django-rest-framework,delinhabit/django-rest-framework,canassa/django-rest-framework,rafaelang/django-rest-framework,rubendura/django-rest-framework,kezabelle/django-rest-framework,dmwyatt/django-rest-framework,ajaali/django-rest-framework,agconti/django-rest-framework,kezabelle/django-rest-framework,hnarayanan/django-rest-framework,jerryhebert/django-rest-framework,rafaelang/django-rest-framework,yiyocx/django-rest-framework,sehmaschine/django-rest-framework,werthen/django-rest-framework,maryokhin/django-rest-framework,HireAnEsquire/django-rest-framework,rhblind/django-rest-framework,maryokhin/django-rest-framework,thedrow/django-rest-framework-1,tcroiset/django-rest-framework,justanr/django-rest-framework,sheppard/django-rest-framework,jtiai/django-rest-framework,cyberj/django-rest-framework,adambain-vokal/django-rest-framework,ambivalentno/django-rest-framework,ebsaral/django-rest-framework,tcroiset/django-rest-framework,rafaelcaricio/django-rest-framework,mgaitan/django-rest-framework,zeldalink0515/django-rest-framework,leeahoward/django-rest-framework,rafaelcaricio/django-rest-framework,wzbozon/django-rest-framework,arpheno/django-rest-framework,agconti/django-rest-framework,johnraz/django-rest-framework,edx/django-rest-framework,nryoung/django-rest-framework,qsorix/django-rest-framework,akalipetis/django-rest-framework,qsorix/django-rest-framework,wedaly/django-rest-framework,wangpanjun/django-rest-framework,jpulec/django-rest-framework,hunter007/django-rest-framework,callorico/django-rest-framework,wedaly/django-rest-framework,hnakamur/django-rest-framework,leeahoward/django-rest-framework,kylefox/django-rest-framework,wwj718/django-rest-framework,brandoncazander/django-rest-framework,waytai/django-rest-framework,xiaotangyuan/django-rest-framework,rhblind/django-rest-framework,jpadilla/django-rest-framework,raphaelmerx/django-rest-framework,arpheno/django-rest-framework,kgeorgy/django-rest-framework,ashishfinoit/django-rest-framework,douwevandermeij/django-rest-framework,waytai/django-rest-framework,rafaelcaricio/django-rest-framework,uruz/django-rest-framework,elim/django-rest-framework,agconti/django-rest-framework,buptlsl/django-rest-framework,sheppard/django-rest-framework,bluedazzle/django-rest-framework,VishvajitP/django-rest-framework,tigeraniya/django-rest-framework,YBJAY00000/django-rest-framework,alacritythief/django-rest-framework,hunter007/django-rest-framework,alacritythief/django-rest-framework,akalipetis/django-rest-framework,werthen/django-rest-framework,dmwyatt/django-rest-framework,buptlsl/django-rest-framework,davesque/django-rest-framework,tigeraniya/django-rest-framework,elim/django-rest-framework,alacritythief/django-rest-framework,tcroiset/django-rest-framework,ebsaral/django-rest-framework,rhblind/django-rest-framework,paolopaolopaolo/django-rest-framework,ambivalentno/django-rest-framework,aericson/django-rest-framework,hnarayanan/django-rest-framework,zeldalink0515/django-rest-framework,zeldalink0515/django-rest-framework,xiaotangyuan/django-rest-framework,atombrella/django-rest-framework,xiaotangyuan/django-rest-framework,justanr/django-rest-framework,jtiai/django-rest-framework,brandoncazander/django-rest-framework,adambain-vokal/django-rest-framework,wwj718/django-rest-framework,James1345/django-rest-framework,akalipetis/django-rest-framework,tigeraniya/django-rest-framework,linovia/django-rest-framework,fishky/django-rest-framework,qsorix/django-rest-framework,dmwyatt/django-rest-framework,mgaitan/django-rest-framework,edx/django-rest-framework,justanr/django-rest-framework,krinart/django-rest-framework,sehmaschine/django-rest-framework,davesque/django-rest-framework,wangpanjun/django-rest-framework,ossanna16/django-rest-framework,atombrella/django-rest-framework,wzbozon/django-rest-framework,ambivalentno/django-rest-framework,aericson/django-rest-framework,ebsaral/django-rest-framework,kennydude/django-rest-framework,arpheno/django-rest-framework,jtiai/django-rest-framework,kylefox/django-rest-framework,abdulhaq-e/django-rest-framework,ossanna16/django-rest-framework,canassa/django-rest-framework,gregmuellegger/django-rest-framework,leeahoward/django-rest-framework,mgaitan/django-rest-framework,kgeorgy/django-rest-framework,paolopaolopaolo/django-rest-framework,rubendura/django-rest-framework,thedrow/django-rest-framework-1,ajaali/django-rest-framework,fishky/django-rest-framework,douwevandermeij/django-rest-framework,sbellem/django-rest-framework,AlexandreProenca/django-rest-framework,yiyocx/django-rest-framework,tomchristie/django-rest-framework,iheitlager/django-rest-framework,waytai/django-rest-framework,lubomir/django-rest-framework,AlexandreProenca/django-rest-framework,ossanna16/django-rest-framework,YBJAY00000/django-rest-framework,potpath/django-rest-framework,vstoykov/django-rest-framework,HireAnEsquire/django-rest-framework,uploadcare/django-rest-framework,pombredanne/django-rest-framework,potpath/django-rest-framework,vstoykov/django-rest-framework,nhorelik/django-rest-framework
rest_framework/__init__.py
rest_framework/__init__.py
""" ______ _____ _____ _____ __ | ___ \ ___/ ___|_ _| / _| | | | |_/ / |__ \ `--. | | | |_ _ __ __ _ _ __ ___ _____ _____ _ __| |__ | /| __| `--. \ | | | _| '__/ _` | '_ ` _ \ / _ \ \ /\ / / _ \| '__| |/ / | |\ \| |___/\__/ / | | | | | | | (_| | | | | | | __/\ V V / (_) | | | < \_| \_\____/\____/ \_/ |_| |_| \__,_|_| |_| |_|\___| \_/\_/ \___/|_| |_|\_| """ __title__ = 'Django REST framework' __version__ = '3.1.3' __author__ = 'Tom Christie' __license__ = 'BSD 2-Clause' __copyright__ = 'Copyright 2011-2015 Tom Christie' # Version synonym VERSION = __version__ # Header encoding (see RFC5987) HTTP_HEADER_ENCODING = 'iso-8859-1' # Default datetime input and output formats ISO_8601 = 'iso-8601'
""" ______ _____ _____ _____ __ | ___ \ ___/ ___|_ _| / _| | | | |_/ / |__ \ `--. | | | |_ _ __ __ _ _ __ ___ _____ _____ _ __| |__ | /| __| `--. \ | | | _| '__/ _` | '_ ` _ \ / _ \ \ /\ / / _ \| '__| |/ / | |\ \| |___/\__/ / | | | | | | | (_| | | | | | | __/\ V V / (_) | | | < \_| \_\____/\____/ \_/ |_| |_| \__,_|_| |_| |_|\___| \_/\_/ \___/|_| |_|\_| """ __title__ = 'Django REST framework' __version__ = '3.1.2' __author__ = 'Tom Christie' __license__ = 'BSD 2-Clause' __copyright__ = 'Copyright 2011-2015 Tom Christie' # Version synonym VERSION = __version__ # Header encoding (see RFC5987) HTTP_HEADER_ENCODING = 'iso-8859-1' # Default datetime input and output formats ISO_8601 = 'iso-8601'
bsd-2-clause
Python