commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
c657d92f1f8dc3cd4ff9995dc0d2857ce8f6fdd4 | Create CountingBits.py | Chasego/codi,Chasego/codirit,cc13ny/algo,Chasego/cod,Chasego/codi,Chasego/codirit,cc13ny/Allin,cc13ny/algo,Chasego/codi,Chasego/codirit,Chasego/cod,Chasego/codirit,cc13ny/algo,cc13ny/algo,Chasego/codi,cc13ny/Allin,Chasego/cod,cc13ny/Allin,Chasego/cod,cc13ny/Allin,Chasego/codirit,cc13ny/Allin,cc13ny/algo,Chasego/codi,Chasego/cod | leetcode/338-Counting-Bits/CountingBits.py | leetcode/338-Counting-Bits/CountingBits.py | class Solution(object):
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
seed = 1
res = [0]
while num > 0:
res += [res[i] + 1 for i in xrange(min(num, seed))]
num -= seed
seed = seed << 1
return res
| mit | Python |
|
0cd5ed79f019db91261c0d858b61796021ec3f80 | Add syntax highlighting tests for PEP 570 | tree-sitter/tree-sitter-python,tree-sitter/tree-sitter-python,tree-sitter/tree-sitter-python,tree-sitter/tree-sitter-python,tree-sitter/tree-sitter-python | test/highlight/parameters.py | test/highlight/parameters.py | def g(h, i, /, j, *, k=100, **kwarg):
# ^ operator
# ^ operator
pass
| mit | Python |
|
a85f0df776da6c8f39e5d5dbb91370531e4605be | Create GUI-Main.py | ReallyGoodPie/Python-Image-Resizer | GUI-Main.py | GUI-Main.py | #!/usr/bin/env python
import tkinter as tk
import sys, glob, time
from tkinter.filedialog import *
from PIL import Image
_imaging = Image.core
class Application(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.grid()
self.input_directory = StringVar()
self.output_directory = StringVar()
self.progress = StringVar()
self.progress = StringVar()
self.width, self.height = StringVar(), StringVar()
self.createWidgets()
self.variable_dictionary = {}
def createWidgets(self):
self.input_directory.set("No Image Direcotry Selected")
self.output_directory.set("No Output Directory Selected")
self.file_Label = tk.Label(self, textvariable=self.input_directory)
self.file_Label.grid(row=0, column=0)
self.file_button = tk.Button(self, text="Open Folder", command = lambda:self.selectFolder("Input"))
self.file_button.grid(row=0, column=1)
self.output_label = tk.Label(self, textvariable=self.output_directory)
self.output_label.grid(row=1, column=0)
self.output_button = tk.Button(self, text = "Select Folder", command = lambda:self.selectFolder("Output"))
self.output_button.grid(row=1, column=1)
self.width_label = tk.Label(self, text="Width (In Pixels): ")
self.width_label.grid(row=2, column=0)
self.width_entry = tk.Entry(self, textvariable=self.width)
self.width_entry.grid(row=2, column=1)
self.height_label = tk.Label(self, text="Height (In Pixels): ")
self.height_label.grid(row=3, column=0)
self.height_entry = tk.Entry(self, textvariable=self.height)
self.height_entry.grid(row=3, column=1)
self.resize_button = tk.Button(self, text="Resize Images", command=lambda:self.resizeImages(self.variable_dictionary["Input"], self.variable_dictionary["Output"]))
self.resize_button.grid(row=4, column=0)
def resizeImages(self, folder, output):
folder = str(folder)
output = str(output)
all_images = glob.glob(folder + "/*.png") + glob.glob(folder + "/*.jpg") + glob.glob(folder + "/*.gif") + glob.glob(folder + "/*.bmp")
print(all_images)
image_count = 0
for x in all_images:
image_count += 1
total_images = image_count
self.progress.set("0/" + str(total_images) + " Images Resized.")
self.status_label = tk.Label(self, textvariable = self.progress)
self.status_label.grid(row=4, column=1)
try:
image_count = 0
size = int(self.width.get()), int(self.height.get())
for image in all_images:
image_count += 1
curent_image = Image.open(image)
curent_image.thumbnail(size, Image.ANTIALIAS)
curent_image.save(output + "/" + str(image_count) + ".JPEG")
self.progress.set( str(image_count) + "/" + str(total_images) + " Images Resized. Resizing: " + image)
except:
print("Failed To Resize Images! ")
def selectFolder(self, name):
self.variable_dictionary[name] = askdirectory(title="Choose The Appropriate Folder")
if name == "Input":
self.input_directory.set(self.variable_dictionary[name])
else:
self.output_directory.set(self.variable_dictionary[name])
if __name__ == "__main__":
app = Application()
app.master.title("Image Resizer")
app.mainloop()
| apache-2.0 | Python |
|
bf6c8ce59ec841b19dab3a02a9065864035d4d82 | Add a new helper to convert stackalytics default_data.json | morucci/repoxplorer,morucci/repoxplorer,morucci/repoxplorer,morucci/repoxplorer | bin/helpers/openstack/stackalytics.py | bin/helpers/openstack/stackalytics.py | import sys
import json
import yaml
import datetime
# Read default_data.json from stackalytics/etc/ and convert for
# repoXplorer.
if __name__ == "__main__":
ident = {'identities': {},
'groups': {}}
data = json.loads(file(sys.argv[1]).read())
users = data['users']
groups = data['companies']
i = ident['identities']
g = ident['groups']
gstore = {}
for group in groups:
gstore[group['company_name']] = group['domains']
for user in users:
try:
i[user['launchpad_id']] = {}
iu = i[user['launchpad_id']]
except:
try:
i[user['github_id']] = {}
iu = i[user['github_id']]
except:
continue
sys.stdout.write('.')
iu['name'] = user['user_name']
iu['default-email'] = user['emails'][0]
iu['emails'] = {}
for email in user['emails']:
iu['emails'].setdefault(email, {})
histo = []
for c in user['companies']:
iu['emails'][email].setdefault('groups', {})
iu['emails'][email]['groups'][c['company_name']] = {}
# cd = iu['emails'][email]['groups'][c['company_name']]
g.setdefault(
c['company_name'], {
'description': '',
'emails': {},
'domains': gstore.get(c['company_name'], [])
})
if c['end_date'] is not None:
end_date_raw = datetime.datetime.strptime(
c['end_date'], '%Y-%b-%d')
histo.append([None, end_date_raw, c['company_name']])
else:
histo.append([None, None, c['company_name']])
histo.sort(key=lambda tup: tup[1] or datetime.datetime.today())
for z, h in enumerate(histo):
if z == 0:
pass
h[0] = histo[z-1][1]
cd = iu['emails'][email]['groups'][h[2]]
if h[0]:
cd['begin-date'] = h[0].strftime('%Y-%m-%d')
if h[1]:
cd['end-date'] = h[1].strftime('%Y-%m-%d')
path = 'test.yaml'
with open(path, 'w') as fd:
fd.write(yaml.safe_dump(ident,
default_flow_style=False))
| apache-2.0 | Python |
|
367a1ff9f0ca3daae3ee804b5484e3863bb72307 | Add initial proposal tests | rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son | tests/views/test_proposal.py | tests/views/test_proposal.py | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for proposal view.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
import httplib
from tests.profile_utils import GSoCProfileHelper
from tests.test_utils import DjangoTestCase
from tests.timeline_utils import TimelineHelper
# TODO: perhaps we should move this out?
from soc.modules.gsoc.models.proposal import GSoCProposal
from soc.modules.seeder.logic.seeder import logic as seeder_logic
class ProposalTest(DjangoTestCase):
"""Tests proposal page.
"""
def setUp(self):
from soc.modules.gsoc.models.program import GSoCProgram
from soc.modules.gsoc.models.organization import GSoCOrganization
properties = {'status': 'visible', 'apps_tasks_limit': 20}
self.gsoc = seeder_logic.seed(GSoCProgram, properties=properties)
properties = {'scope': self.gsoc, 'status': 'active'}
self.org = seeder_logic.seed(GSoCOrganization, properties=properties)
self.timeline = TimelineHelper(self.gsoc.timeline)
self.data = GSoCProfileHelper(self.gsoc)
def assertProposalTemplatesUsed(self, response):
"""Asserts that all the templates from the dashboard were used.
"""
self.assertGSoCTemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gsoc/proposal/base.html')
self.assertTemplateUsed(response, 'v2/modules/gsoc/_form.html')
def testSubmitProposal(self):
self.data.createStudent()
self.timeline.studentSignup()
url = '/gsoc/proposal/submit/' + self.org.key().name()
response = self.client.get(url)
self.assertProposalTemplatesUsed(response)
# test POST
override = {'program': self.gsoc, 'score': 0, 'mentor': None, 'org': self.org, 'status': 'new'}
properties = seeder_logic.seed_properties(GSoCProposal, properties=override)
postdata = properties.copy()
postdata['xsrf_token'] = self.getXsrfToken(url)
response = self.client.post(url, postdata)
self.assertResponseRedirect(response)
# TODO(SRabbelier): verify
proposal = GSoCProposal.all().get()
self.assertPropertiesEqual(properties, proposal)
| apache-2.0 | Python |
|
ee4ab0cf3ef08459e1a8ad1cdae370870ba28805 | Create lc1755.py | FiveEye/ProblemSet,FiveEye/ProblemSet | LeetCode/lc1755.py | LeetCode/lc1755.py | class Solution:
def minAbsDifference(self, nums: List[int], goal: int) -> int:
n = len(nums)
nums.sort(key=lambda x: -abs(x))
neg = [0 for _ in range(n+1)]
pos = [0 for _ in range(n+1)]
for i in range(n-1, -1, -1):
if nums[i] < 0:
neg[i] = neg[i+1] + nums[i]
pos[i] = pos[i+1]
else:
pos[i] = pos[i+1] + nums[i]
neg[i] = neg[i+1]
# print(nums, pos, neg)
ans = abs(goal)
s = set([0])
def check(a, b):
if b < goal - ans or goal + ans < a:
return False
return True
for i in range(n):
s = set([x for x in s if check(x+neg[i], x+pos[i])])
# print(s)
t = set()
for x in s:
y = x + nums[i]
if abs(y - goal) < ans:
ans = abs(y - goal)
t.add(y)
s |= t
return ans
| mit | Python |
|
238d031651cb74d0ca9bed9d38cda836049c9c37 | Correct fallback for tag name | daevaorn/sentry,gencer/sentry,zenefits/sentry,beeftornado/sentry,imankulov/sentry,BuildingLink/sentry,looker/sentry,looker/sentry,fotinakis/sentry,JamesMura/sentry,mvaled/sentry,alexm92/sentry,ifduyue/sentry,jean/sentry,beeftornado/sentry,fotinakis/sentry,BuildingLink/sentry,ifduyue/sentry,alexm92/sentry,BuildingLink/sentry,fotinakis/sentry,nicholasserra/sentry,BuildingLink/sentry,JackDanger/sentry,JamesMura/sentry,imankulov/sentry,jean/sentry,nicholasserra/sentry,zenefits/sentry,zenefits/sentry,beeftornado/sentry,ifduyue/sentry,mvaled/sentry,zenefits/sentry,gencer/sentry,looker/sentry,ifduyue/sentry,daevaorn/sentry,gencer/sentry,mvaled/sentry,mvaled/sentry,daevaorn/sentry,JackDanger/sentry,JamesMura/sentry,mvaled/sentry,BuildingLink/sentry,zenefits/sentry,mitsuhiko/sentry,imankulov/sentry,daevaorn/sentry,jean/sentry,JamesMura/sentry,gencer/sentry,mitsuhiko/sentry,mvaled/sentry,jean/sentry,fotinakis/sentry,ifduyue/sentry,nicholasserra/sentry,jean/sentry,JackDanger/sentry,looker/sentry,looker/sentry,alexm92/sentry,JamesMura/sentry,gencer/sentry | src/sentry/api/serializers/models/grouptagkey.py | src/sentry/api/serializers/models/grouptagkey.py | from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import GroupTagKey, TagKey
@register(GroupTagKey)
class GroupTagKeySerializer(Serializer):
def get_attrs(self, item_list, user):
tag_labels = {
t.key: t.get_label()
for t in TagKey.objects.filter(
project=item_list[0].project,
key__in=[i.key for i in item_list]
)
}
result = {}
for item in item_list:
try:
label = tag_labels[item.key]
except KeyError:
if item.key.startswith('sentry:'):
label = item.key.split('sentry:', 1)[-1]
else:
label = item.key
result[item] = {
'name': label,
}
return result
def serialize(self, obj, attrs, user):
if obj.key.startswith('sentry:'):
key = obj.key.split('sentry:', 1)[-1]
else:
key = obj.key
return {
'name': attrs['name'],
'key': key,
'uniqueValues': obj.values_seen,
}
| from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import GroupTagKey, TagKey
@register(GroupTagKey)
class GroupTagKeySerializer(Serializer):
def get_attrs(self, item_list, user):
tag_labels = {
t.key: t.get_label()
for t in TagKey.objects.filter(
project=item_list[0].project,
key__in=[i.key for i in item_list]
)
}
result = {}
for item in item_list:
try:
label = tag_labels[item.key]
except KeyError:
label = item.value
result[item] = {
'name': label,
}
return result
def serialize(self, obj, attrs, user):
if obj.key.startswith('sentry:'):
key = obj.key.split('sentry:', 1)[-1]
else:
key = obj.key
return {
'name': attrs['name'],
'key': key,
'uniqueValues': obj.values_seen,
}
| bsd-3-clause | Python |
bc3495acdc9f53e2fa7d750f3dd7bb53826326e3 | Create csvloader.py | taygetea/scatterplot-visualizer | csvloader.py | csvloader.py | import random
import csv
with open('points.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=' ',quotechar='|', quoting=csv.QUOTE_MINIMAL)
row = []
for i in range(1000):
row.append(random.randrange(-2000,1000))
row.append(random.randrange(20,1000))
row.append(random.randrange(0,3))
writer.writerow(row)
row = []
| mit | Python |
|
8fe27d56592978a0d2a2e43b07214f982bad2010 | Add intermediate tower 8 | arbylee/python-warrior | pythonwarrior/towers/intermediate/level_008.py | pythonwarrior/towers/intermediate/level_008.py | # -------
# |@ Ss C>|
# -------
level.description("You discover a satchel of bombs which will help "
"when facing a mob of enemies.")
level.tip("Detonate a bomb when you see a couple enemies ahead of "
"you (warrior.look()). Watch out for your health too.")
level.clue("Calling warrior.look() will return an array of Spaces. If the "
"first two contain enemies, detonate a bomb with "
"warrior.detonate_().")
level.time_bonus(30)
level.size(7, 1)
level.stairs(6, 0)
def add_war_abilities(warrior):
warrior.add_abilities('look', 'detonate_')
level.warrior(0, 0, 'east', func=add_war_abilities)
def add_captive_abilities(unit):
unit.add_abilities('explode_')
unit.abilities_attr['explode_'].time = 9
level.unit('captive', 5, 0, 'west', func=add_captive_abilities)
level.unit('thick_sludge', 2, 0, 'west')
level.unit('sludge', 3, 0, 'west')
| mit | Python |
|
5e9c0961c381dcebe0331c8b0db38794de39300b | Initialize P01_fantasy_game_inventory | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/AutomateTheBoringStuffWithPython/Chapter05/PracticeProjects/P01_fantasy_game_inventory.py | books/AutomateTheBoringStuffWithPython/Chapter05/PracticeProjects/P01_fantasy_game_inventory.py | # This program models a player's inventory from a fantasy game
# You are creating a fantasy video game. The data structure to model the player’s
# inventory will be a dictionary where the keys are string values describing the item
# in the inventory and the value is an integer value detailing how many of that item
# the player has.
#
# For example, the dictionary value
# {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12}
# means the player has 1 rope, 6 torches, 42 gold coins, and so on.
#
# Write a function named displayInventory() that would take any possible “inventory”
# and display it like the following:
# Inventory:
# 12 arrow
# 42 gold coin
# 1 rope
# 6 torch
# 1 dagger
#
# Total number of items: 62
stuff = {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12}
def displayInventory(inventory):
print("Inventory:")
item_total = 0
for k, v in inventory.items():
# FILL THIS PART IN
print("Total number of items: " + str(item_total))
displayInventory(stuff)
| mit | Python |
|
b50811f87d10dab0768feed293e239ca98a91538 | fix issue with ptu server and morse topic by correcting and republishing /ptu/state | bfalacerda/strands_apps,strands-project/strands_apps,strands-project/strands_apps,strands-project/strands_apps,bfalacerda/strands_apps,bfalacerda/strands_apps | topic_republisher/scripts/republish_ptu_state.py | topic_republisher/scripts/republish_ptu_state.py | #!/usr/bin/env python
import rospy
from sensor_msgs.msg import JointState
class JointStateRepublisher():
"A class to republish joint state information"
def __init__(self):
rospy.init_node('ptu_state_republisher')
self.pub = rospy.Publisher('/ptu/state', JointState)
rospy.Subscriber("/ptu_state", JointState, self.callback)
rospy.loginfo(rospy.get_name() + " setting up")
def callback(self,data):
rospy.logdebug(rospy.get_name() + ": I heard %s, %s", data.name, data.position)
pan_idx = data.name.index('pan')
tilt_idx = data.name.index('tilt')
js = JointState()
js.header = data.header
js.name.append(data.name[pan_idx])
js.name.append(data.name[tilt_idx])
js.position.append(data.position[pan_idx])
js.position.append(data.position[tilt_idx])
self.pub.publish(js)
if __name__ == '__main__':
republisher = JointStateRepublisher()
rospy.spin()
| mit | Python |
|
007b2d2ce61864e87de368e508fa971864847fc7 | Create findPrimes.py | TylerWitt/py | findPrimes.py | findPrimes.py | # Tyler Witt
# findPrimes.py
# 6.27.14
# ver 1.0
# This function implements the Sieve of Eratosthenes algorithm to find all the prime numbers below lim
def findPrimes(lim):
primes = []
cur = 0
if lim < 2:
return None
for num in range(2, lim + 1):
primes.append(num)
while (primes[cur] ** 2 < lim):
for val in primes:
if val % primes[cur] == 0 and val != primes[cur]:
primes.remove(val)
cur += 1
return (primes)
| mit | Python |
|
80f294e134ef684feb8ac700747a65522edf8758 | add new example in the gallery | sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana | examples/plot_kraken.py | examples/plot_kraken.py | """
Kraken module example
=======================
kraken module showing distribution of the most frequent taxons
Please, see :mod:`sequana.kraken` for more information and the
quality_taxon pipeline module or kraken rule.
"""
#This plots a simple taxonomic representation of the output
#of the taxonomic pipeline. A more complete and interactive
#representatino using krona is available when using the
#quality_taxon pipeline in Sequana.
##############################################
# test
from sequana import KrakenContaminant
k = KrakenContaminant("kraken.out", verbose=False)
k.plot(kind='pie')
####################################################
# The input file **kraken.out** is the output of the
# Kraken tool. It is a ste of rows such as those ones::
#
# C HISEQ:426:C5T65ACXX:5:2301:5633:7203 11234 203 0:2 11234:1 0:1 11234:1 0:2 11234:1 0:13 11234:1 0:1 11234:1 0:3 11234:1 0:16 11234:1 0:5 11234:1 0:6 11234:1 0:13 A:31 0:33 11234:1 0:29 11234:1 0:7
# C HISEQ:426:C5T65ACXX:5:2301:5815:7120 11234 203 0:4 11234:1 0:12 11234:1 0:22 11234:1 0:1 0 11234:1 0:5 11234:1 0:7 11234:1 0:5 A:31 0:3 11234:1 0:22 11234:1 0:18 11234:1 0:24 11234:1
#
#
# The KrakenContaminant class will read the file, download a taxonomic database
# from EBI, map the taxon found in the **kraken.out** file and figure out the
# lineage. In the example above, only the scientific name is found. In the
# snakefile provided in Sequana, the full pipeline produces a full lineage
# representation using krona tool.
#
# .. seealso:: :ref:`pipelines`
| bsd-3-clause | Python |
|
341ca75484b4607eb632d52bf257c8190ebf8a3b | Create fishspine3.py | mattdavie/FishSpine | fishspine3.py | fishspine3.py | #Fish vertebral location code
| cc0-1.0 | Python |
|
653ab8128de3c08b6b8be0d662f12ef5a3edf6b2 | Add grafana build rule | clchiou/garage,clchiou/garage,clchiou/garage,clchiou/garage | shipyard/rules/third-party/grafana/build.py | shipyard/rules/third-party/grafana/build.py | from foreman import get_relpath, rule
from garage import scripts
from templates.common import define_distro_packages
GRAFANA_DEB = 'grafana_5.1.4_amd64.deb'
GRAFANA_DEB_URI = 'https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.1.4_amd64.deb'
GRAFANA_DEB_CHECKSUM = 'sha256-bbec4cf6112c4c2654b679ae808aaad3b3e4ba39818a6d01f5f19e78946b734e'
define_distro_packages([
'adduser',
'libfontconfig',
])
@rule
@rule.depend('//base:build')
@rule.depend('install_packages')
def build(parameters):
drydock_src = parameters['//base:drydock'] / get_relpath()
scripts.mkdir(drydock_src)
with scripts.directory(drydock_src):
deb_path = drydock_src / GRAFANA_DEB
if not deb_path.exists():
scripts.wget(GRAFANA_DEB_URI, deb_path)
scripts.ensure_checksum(deb_path, GRAFANA_DEB_CHECKSUM)
with scripts.using_sudo():
scripts.execute(['dpkg', '--install', deb_path])
@rule
@rule.depend('build')
@rule.reverse_depend('//base:tapeout')
def tapeout(parameters):
with scripts.using_sudo():
rootfs = parameters['//base:drydock/rootfs']
scripts.rsync(
[
'/usr/sbin/grafana-server',
'/usr/share/grafana',
],
rootfs,
relative=True,
)
@rule
@rule.depend('//base:tapeout')
def trim_usr(parameters):
rootfs = parameters['//base:drydock/rootfs']
with scripts.using_sudo():
scripts.rm(rootfs / 'usr/lib', recursive=True)
scripts.rm(rootfs / 'usr/local/lib', recursive=True)
| mit | Python |
|
4dd0b349f971cd5ba4842f79a7dba36bf4999b6f | Add Jmol package (#3041) | skosukhin/spack,EmreAtes/spack,LLNL/spack,lgarren/spack,TheTimmy/spack,matthiasdiener/spack,TheTimmy/spack,lgarren/spack,krafczyk/spack,krafczyk/spack,skosukhin/spack,TheTimmy/spack,krafczyk/spack,tmerrick1/spack,tmerrick1/spack,matthiasdiener/spack,LLNL/spack,iulian787/spack,iulian787/spack,EmreAtes/spack,lgarren/spack,lgarren/spack,EmreAtes/spack,mfherbst/spack,iulian787/spack,skosukhin/spack,mfherbst/spack,mfherbst/spack,tmerrick1/spack,krafczyk/spack,iulian787/spack,mfherbst/spack,mfherbst/spack,matthiasdiener/spack,iulian787/spack,krafczyk/spack,LLNL/spack,tmerrick1/spack,TheTimmy/spack,LLNL/spack,lgarren/spack,EmreAtes/spack,skosukhin/spack,matthiasdiener/spack,EmreAtes/spack,skosukhin/spack,LLNL/spack,tmerrick1/spack,TheTimmy/spack,matthiasdiener/spack | var/spack/repos/builtin/packages/jmol/package.py | var/spack/repos/builtin/packages/jmol/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
from distutils.dir_util import copy_tree
class Jmol(Package):
"""Jmol: an open-source Java viewer for chemical structures in 3D
with features for chemicals, crystals, materials and biomolecules."""
homepage = "http://jmol.sourceforge.net/"
url = "https://sourceforge.net/projects/jmol/files/Jmol/Version%2014.8/Jmol%2014.8.0/Jmol-14.8.0-binary.tar.gz"
version('14.8.0', '3c9f4004b9e617ea3ea0b78ab32397ea')
depends_on('jdk', type='run')
def install(self, spec, prefix):
copy_tree('jmol-{0}'.format(self.version), prefix)
def setup_environment(self, spack_env, run_env):
run_env.prepend_path('PATH', self.prefix)
run_env.set('JMOL_HOME', self.prefix)
| lgpl-2.1 | Python |
|
dc635babcf78343bf9490a77d716db89bda2698b | Create __init__.py | chidaobanjiu/MANA2077,chidaobanjiu/MANA2077,chidaobanjiu/MANA2077,chidaobanjiu/Flask_Web,chidaobanjiu/MANA2077,chidaobanjiu/Flask_Web,chidaobanjiu/Loocat.cc,chidaobanjiu/Loocat.cc,chidaobanjiu/MANA2077,chidaobanjiu/Flask_Web,chidaobanjiu/Flask_Web,chidaobanjiu/Loocat.cc,chidaobanjiu/MANA2077 | api_1_0/__init__.py | api_1_0/__init__.py | from flask import Blueprint
api = Blueprint('api', __name__)
from . import authentication, posts, users, comments, errors
| mit | Python |
|
7d29c44e19c1f06deb0722a3df51501b39566c4b | Implement simple en/decoding command line tool | fritz0705/flynn | flynn/tool.py | flynn/tool.py | # coding: utf-8
import sys
import argparse
import flynn
import json
def main(args=sys.argv[1:]):
formats = {"json", "cbor", "cbori", "cborh", "cborhi"}
argparser = argparse.ArgumentParser()
argparser.add_argument("-i", "--input-format", choices=formats, default="cbor")
argparser.add_argument("-o", "--output-format", choices=formats, default="cbor")
args = argparser.parse_args(args)
if args.input_format in {"cbor", "cbori"}:
input_format = "cbor"
else:
input_format = args.input_format
output_format = args.output_format
intermediate = None
if input_format in {"cbor", "cbori"}:
intermediate = flynn.load(sys.stdin.raw)
elif input_format in {"cborh", "cborhi"}:
intermediate = flynn.loadh(sys.stdin.read())
elif input_format == "json":
intermediate = json.load(sys.stdin)
if output_format == "cbor":
flynn.dump(intermediate, sys.stdout.raw)
elif output_format == "cbori":
flynn.dump(intermediate, sys.stdout.raw, cls=flynn.encoder.InfiniteEncoder)
elif output_format == "cborh":
sys.stdout.write(flynn.dumph(intermediate))
elif output_format == "cborhi":
sys.stdout.write(flynn.dumph(intermediate, cls=flynn.encoder.InfiniteEncoder))
elif output_format == "json":
json.dump(intermediate, sys.stdout)
if __name__ == "__main__":
main()
| mit | Python |
|
5af92f3905f2d0101eeb42ae7cc51bff528ea6ea | Write bodies given by coordinates to a VTK file | jni/synapse-geometry,janelia-flyem/synapse-geometry | syngeo/io.py | syngeo/io.py | # stardard library
import sys, os
# external libraries
import numpy as np
from ray import imio, evaluate
def add_anything(a, b):
return a + b
def write_synapse_to_vtk(neurons, coords, fn, im=None, t=(2,0,1), s=(1,-1,1),
margin=None):
"""Output neuron shapes around pre- and post-synapse coordinates.
The coordinate array is a (n+1) x m array, where n is the number of
post-synaptic sites (fly neurons are polyadic) and m = neurons.ndim, the
number of dimensions of the image.
"""
neuron_ids = neurons[zip(*(coords[:,t]*s))]
synapse_volume = reduce(add_anything,
[(i+1)*(neurons==j) for i, j in enumerate(neuron_ids)])
imio.write_vtk(synapse_volume, fn)
if im is not None:
imio.write_vtk(im,
os.path.join(os.path.dirname(fn), 'image.' + os.path.basename(fn)))
| bsd-3-clause | Python |
|
e6e90cef36551796f7fb06585c67508538ce113f | Create MaxCounters.py | IshwarBhat/codility | Counting-Elements/MaxCounters.py | Counting-Elements/MaxCounters.py | # https://codility.com/demo/results/trainingTC7JSX-8E9/
def solution(N, A):
counters = N * [0]
max_counters = 0
for elem in A:
if elem == N+1:
counters = N * [max_counters]
else:
this_elem = counters[elem-1] + 1
counters[elem-1] = this_elem
if this_elem > max_counters:
max_counters = this_elem
return counters
| mit | Python |
|
ce1d13bc6827f780e44491b630e64df7b52634f1 | add vibration sensor code | qszhuan/raspberry-pi,qszhuan/raspberry-pi,qszhuan/raspberry-pi | gpio/vibration-sendor-test.py | gpio/vibration-sendor-test.py | import RPi.GPIO as GPIO
import time
import datetime
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
IN_PIN = 18
LED_PIN = 17
GPIO.setup(IN_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(LED_PIN, GPIO.OUT)
GPIO.output(LED_PIN, GPIO.LOW)
def turn_on(led_pin):
GPIO.output(led_pin, GPIO.HIGH)
def turn_off(led_pin):
GPIO.output(led_pin, GPIO.LOW)
count = 0
while True:
i=GPIO.input(IN_PIN)
if(count == 1000):
turn_off(LED_PIN)
count += 1
if i==1:
print(datetime.datetime.now(), "Vibration detected",i)
time.sleep(0.1)
count = 0
turn_on(LED_PIN)
| mit | Python |
|
e7ef1806f84e6d07ef88ca23444f37cf6f50e014 | Add a console-less version. | mpasternak/wxMailServer | wxMailServer.pyw | wxMailServer.pyw | # -*- encoding: utf-8 -*-
from wxMailServer import main
if __name__ == "__main__":
main() | mit | Python |
|
b419f8c9f562d3d16a6079e949c47ec2adc4c97d | add utility script for merging test files | phate/jive,phate/jive,phate/jive | scripts/merge-tests.py | scripts/merge-tests.py | import sys
c_includes = set()
cxx_includes = set()
jive_includes = set()
local_includes = set()
code_blocks = []
def mangle(fname):
name = fname[6:-2]
name = name.replace('/', '_')
name = name.replace('-', '_')
return name
for fname in sys.argv[1:]:
seen_includes = False
code_lines = []
name = mangle(fname)
for line in file(fname).readlines():
line = line[:-1]
if line[:9] == "#include ":
include = line[9:]
if include[:6] == "<jive/":
jive_includes.add(include)
elif include[-3:] == ".h>":
c_includes.add(include)
elif include[0] == '"':
local_includes.add(include)
else:
cxx_includes.add(include)
seen_includes = True
continue
if not seen_includes: continue
line = line + '\n'
if line == '\n' and code_lines and code_lines[-1] == '\n':
continue
line = line.replace('test_main', name)
code_lines.append(line)
code_blocks.append(''.join(code_lines))
out = sys.stdout
if local_includes:
for i in sorted(local_includes): out.write('#include %s\n' % i)
out.write('\n')
if c_includes:
for i in sorted(c_includes): out.write('#include %s\n' % i)
out.write('\n')
if cxx_includes:
for i in sorted(cxx_includes): out.write('#include %s\n' % i)
out.write('\n')
if jive_includes:
for i in sorted(jive_includes): out.write('#include %s\n' % i)
out.write('\n')
for c in code_blocks: out.write(c)
| lgpl-2.1 | Python |
|
62e17c30ba45458254c0da5b14582aeeac9eab4c | Add command to pre-generate all jpeg images | Signbank/BSL-signbank,Signbank/BSL-signbank,Signbank/Auslan-signbank,Signbank/Auslan-signbank,Signbank/Auslan-signbank,Signbank/BSL-signbank,Signbank/Auslan-signbank,Signbank/BSL-signbank | signbank/video/management/commands/makejpeg.py | signbank/video/management/commands/makejpeg.py | """Convert a video file to flv"""
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from signbank.video.models import GlossVideo
import os
class Command(BaseCommand):
help = 'Create JPEG images for all videos'
args = ''
def handle(self, *args, **options):
# just access the poster path for each video
for vid in GlossVideo.objects.all():
p = vid.poster_path()
print p
else:
print "Usage makejpeg"
| bsd-3-clause | Python |
|
83d8199eccf7261a8e2f01f7665537ee31702f8c | Create QNAP_Shellshock.py | atantawy/exploits | QNAP_Shellshock.py | QNAP_Shellshock.py | #!/usr/bin/python
import socket
print "QNAP exploit!"
inputstr=""
ip="x.x.x.x" #Change IP Value
port=8080
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
inputstr=raw_input("cmd> ")
s.connect(ip,port))
s.send("GET /cgi-bin/index.cgi HTTP/1.0\nHost: "+ip+"\nUser-Agent: () { :;}; echo; "+inputstr+"\r\n\r\n")
output=""
while True:
buf=s.recv(4096)
if not buf:
break
output+=buf
indexHTML= output.find("html")
print output[0:indexHTML]
s.close()
| apache-2.0 | Python |
|
63c2c7a696aedb1b08d2478a2b84aec42f4364cf | Add tests for URLConverter | kseppi/mrs-mapreduce,byu-aml-lab/mrs-mapreduce | tests/bucket/test_url_converter.py | tests/bucket/test_url_converter.py | from mrs.bucket import URLConverter
def test_local_to_global():
c = URLConverter('myhost', 42, '/my/path')
url = c.local_to_global('/my/path/xyz.txt')
assert url == 'http://myhost:42/xyz.txt'
def test_local_to_global_outside_dir():
c = URLConverter('myhost', 42, '/my/path')
url = c.local_to_global('/other/path/xyz.txt')
assert url == '/other/path/xyz.txt'
def test_global_to_local():
c = URLConverter('myhost', 42, '/my/path')
master = 'server:8080'
url = c.global_to_local('http://myhost:42/xyz.txt', master)
assert url == '/my/path/xyz.txt'
def test_global_to_local_other():
c = URLConverter('myhost', 42, '/my/path')
master = 'server:8080'
url = c.global_to_local('http://other:443/xyz.txt', master)
assert url == 'http://other:443/xyz.txt'
def test_global_to_local_master():
c = URLConverter('myhost', 42, '/my/path')
master = 'server:8080'
url = c.global_to_local('http:///xyz.txt', master)
assert url == 'http://server:8080/xyz.txt'
# vim: et sw=4 sts=4
| apache-2.0 | Python |
|
88cf8e30da6ab655dfc31b2fd88d26ef649e127d | add sha digest tool | congminghaoxue/learn_python | getDigest.py | getDigest.py | #!/usr/bin/env python
# encoding: utf-8
import sys
import hashlib
def getDigest(file):
# BUF_SIZE is totally arbitrary, change for your app!
BUF_SIZE = 65536 # lets read stuff in 64kb chunks!
md5 = hashlib.md5()
sha1 = hashlib.sha1()
with open(file, 'rb') as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
md5.update(data)
sha1.update(data)
print("MD5: {0}".format(md5.hexdigest()))
print("SHA1: {0}".format(sha1.hexdigest()))
def main(argv):
getDigest(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv[1]))
| apache-2.0 | Python |
|
e9acbc2e1423084ddd4241e2fbdcc7fcbf02ad6d | add empty migration as data migration | rsalmaso/django-fluo-coupons,rsalmaso/django-fluo-coupons | coupons/migrations/0005_auto_20151105_1502.py | coupons/migrations/0005_auto_20151105_1502.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coupons', '0004_auto_20151105_1456'),
]
operations = [
]
| bsd-3-clause | Python |
|
3004dec0e0deadc4df61bafb233cd6b277c9bfef | Add in small utility that creates an index on the MongoDB collection, specifically on the Steam ID number key | mulhod/reviewer_experience_prediction,mulhod/reviewer_experience_prediction | util/create_mongodb_index.py | util/create_mongodb_index.py | #!/usr/env python3.4
import sys
from pymongo import ASCENDING
from util.mongodb import connect_to_db
from argparse import (ArgumentParser,
ArgumentDefaultsHelpFormatter)
def main(argv=None):
parser = ArgumentParser(description='Run incremental learning '
'experiments.',
formatter_class=ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('-dbhost', '--mongodb_host',
help='Host that the MongoDB server is running on.',
type=str,
default='localhost')
parser.add_argument('--mongodb_port', '-dbport',
help='Port that the MongoDB server is running on.',
type=int,
default=37017)
args = parser.parse_args()
# Connect to MongoDB database
print('Connecting to MongoDB database at {}:{}...'
.format(args.mongodb_host,
args.mongodb_port),
file=sys.stderr)
db = connect_to_db(args.mongodb_host,
args.mongodb_port)
# Create index on 'steam_id_number' so that cursors can be sorted
# on that particular key
print('Creating index on the "steam_id_number" key...',
file=sys.stderr)
db.create_index('steam_id_number', ASCENDING)
print('Created new index "steam_id_number_1" in reviews '
'collection.',
file=sys.stderr)
| mit | Python |
|
3da9953aa453281fd55ada75b2ed40fce8d9df6c | Create screen_op.py | ninjawil/weather-station,ninjawil/weather-station,ninjawil/weather-station,ninjawil/weather-station,ninjawil/weather-station,ninjawil/weather-station,ninjawil/weather-station | screen_op.py | screen_op.py | #-------------------------------------------------------------------------------
#
# Controls shed weather station
#
# The MIT License (MIT)
#
# Copyright (c) 2015 William De Freitas
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#-------------------------------------------------------------------------------
#!usr/bin/env python
#===============================================================================
# Import modules
#===============================================================================
import os
import settings as s
#===============================================================================
# DRAW SCREEN
#===============================================================================
def draw_screen(sensors, thingspeak_enable, key, rrd_enable, rrd_set):
os.system('clear')
display_string = []
display_string.append('WEATHER STATION')
display_string.append('')
display_string.append('Next precip. acc. reset at '+ str(s.PRECIP_ACC_RESET_TIME))
#Display thingspeak field data set up
if thingspeak_enable:
display_string.append('')
display_string.append('Thingspeak write api key: '+key)
display_string.append('')
display_string.append('Thingspeak field set up:')
display_string.append(' Field\tName\t\tValue\tUnit')
display_string.append(' ---------------------------------------')
for key, value in sorted(sensors.items(), key=lambda e: e[1][0]):
display_string.append(' ' + str(value[s.TS_FIELD]) + '\t' + key +
'\t' + str(value[s.VALUE]) + '\t' + value[s.UNIT])
#Display RRDtool set up
if rrd_enable:
display_string.append('')
display_string.append('RRDtool set up:')
for i in range(0,len(rrd_set)):
display_string += rrd_set[i]
display_string.append('')
#Create table header
display_string.append('')
header ='Date\t\tTime\t\t'
header_names = ''
for key, value in sorted(sensors.items(), key=lambda e: e[1][0]):
header_names = header_names + key +'\t'
header = header + header_names + 'TS Send'
display_string.append(header)
display_string.append('=' * (len(header) + 5 * header.count('\t')))
#Find the total number of rows on screen
rows, columns = os.popen('stty size', 'r').read().split()
#Draw screen
print('\n'.join(display_string))
#Return number of rows left for data
return(int(rows) - len(display_string))
| mit | Python |
|
754dc2a5bc26a555576970a494a9de0e5026fae1 | Add DTFT demo | jnez71/demos,jnez71/demos | dtft.py | dtft.py | #!/usr/bin/env python3
"""
Using a typical FFT routine and showing the principle
behind the DTFT computation.
"""
import numpy as np
from matplotlib import pyplot
##################################################
# Efficient practical usage
def fft(values, dt):
freqs = np.fft.rfftfreq(len(values), dt)
coeffs = np.sqrt(2/len(values)) * np.fft.rfft(values) # scaled for unitarity
coeffs[0] /= np.sqrt(2) # don't "double count" the DC alias
return (freqs, coeffs)
# Working principle
def dtft(values, dt):
times = dt * np.arange(len(values))
nyquist = 1/(2*dt)
dw = 1/(dt*len(values))
freqs = np.arange(0.0, nyquist+dw, dw)
# (rad/s)/Hz all w*t products
dtft_matrix = np.exp(-1j * (2*np.pi) * np.outer(freqs, times))
coeffs = np.sqrt(2/len(values)) * dtft_matrix.dot(values) # scaled for unitarity
coeffs[0] /= np.sqrt(2) # don't "double count" the DC alias
return (freqs, coeffs)
##################################################
def function(time):
w = 20*np.pi
value = 0.0
for k in range(5):
value += (k+1)*np.cos((k*w)*time)
return value
dt = 0.001
times = np.arange(0.0, 0.2, dt)
values = function(times)
##################################################
fft_freqs, fft_coeffs = fft(values, dt)
dtft_freqs, dtft_coeffs = dtft(values, dt)
assert np.allclose(fft_freqs, dtft_freqs)
assert np.allclose(fft_coeffs, dtft_coeffs)
##################################################
# Demonstrate Parseval's theorem
print(np.linalg.norm(values))
print(np.linalg.norm(dtft_coeffs))
##################################################
fig = pyplot.figure()
ax = fig.add_subplot(2, 1, 1)
ax.plot(times, values)
ax.set_xlabel("Time (s)", fontsize=16)
ax.grid(True)
ax = fig.add_subplot(2, 1, 2)
ax.scatter(dtft_freqs, np.abs(dtft_coeffs))
ax.set_xlabel("Freq (Hz)", fontsize=16)
ax.grid(True)
pyplot.show()
| mit | Python |
|
f342a3bb330eab74f31f632c81792f93a6e086e8 | Add a script to automate the generation of source distributions for Windows and Linux. | scottza/PyTOPKAPI,sahg/PyTOPKAPI | create_distributions.py | create_distributions.py | """Script to automate the creation of Windows and Linux source distributions.
The TOPKAPI_example directory is also copied and the .svn directories stripped
to make a clean distribution. The manual is included in MSWord format for now
because this is how it's stored in SVN.
This script currently relies on Linux tools and will only work on a Linux
system for now.
"""
import os
import shutil
command = 'find . -name .svn -type d -print0 | xargs -0 rm -rf'
def make_distro(dist_path, ex_path, files):
path = os.path.join(dist_path, ex_path)
if os.path.isdir(dist_path):
for root, dirs, files in os.walk(dist_path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
shutil.copytree(ex_path, path)
curr_dir = os.getcwd()
os.chdir(path)
os.system(command)
os.chdir(curr_dir)
for fname in files:
shutil.copy(fname, dist_path)
if __name__ == "__main__":
# make sure the source distributions are built
os.system('python setup.py sdist --formats=gztar,zip')
# make Linux distribution
dist_path = 'TOPKAPI_linux'
ex_path = 'TOPKAPI_Example'
linux_files = ['dist/TOPKAPI-0.1.tar.gz', 'TOPKAPI_Manual.doc']
make_distro(dist_path, ex_path, linux_files)
# make Windows distribution
dist_path = 'TOPKAPI_windows'
ex_path = 'TOPKAPI_Example'
windows_files = ['dist/TOPKAPI-0.1.zip', 'TOPKAPI_Manual.doc']
make_distro(dist_path, ex_path, windows_files)
| bsd-3-clause | Python |
|
6fd4aefcc70e28d96d7110a903328f24b6fea5e4 | bring back the in RAM version, it uses less RAM, but too much to pass 10M entries I think | warnerpr/zorin | zorin/mreport.py | zorin/mreport.py | import sys
import json
class Site(object):
def __init__(self):
self.op_events = {}
self.chats = set()
self.emails = set()
self.operators = set()
self.visitors = set()
def add_operator_event(self, ts, op, state):
self.op_events[op] = sorted(set(self.op_events.get(op, []) + [(ts, state)]))
self.operators.add(op)
def get_state(self, time_stamp):
states = []
for op, events in self.op_events.items():
prev_state = False
for ts, state in events:
if ts > time_stamp:
break
prev_state = state
states.append(prev_state)
return True if True in states else False
def add_chat(self, time_stamp, visitor):
if time_stamp in self.chats or time_stamp in self.emails:
return
state = self.get_state(time_stamp)
if state:
self.chats.add(time_stamp)
else:
self.emails.add(time_stamp)
self.visitors.add(visitor)
def report(self, site_id):
print "{site_id},messages={messages},emails={emails},operators={operators},visitors={visitors}".format(
site_id=site_id, messages=len(self.chats), emails=len(self.emails),
operators=len(self.operators), visitors=len(self.visitors))
def main():
fname = sys.argv[1]
iterations = []
for iter in range(0,15):
sites = {}
iterations.append(sites)
with open(fname) as f:
for line in f.readlines():
data = json.loads(line)
site_id = data['site_id']
site = sites.setdefault(site_id, Site())
if data['type'] == 'status':
status = True if data['data']['status'] == 'online' else False
site.add_operator_event(int(data['timestamp']), intern(str(data['from'])), status)
with open(fname) as f:
for line in f.readlines():
data = json.loads(line.strip())
site_id = data['site_id']
site = sites[site_id]
if data['type'] == 'message':
site.add_chat(int(data['timestamp']), intern(str(data['from'])))
# for site_id, site in sorted(sites.items(), key=lambda _e: _e[0]):
# site.report(site_id)
raw_input("Press Enter to continue...")
print iterations
if __name__ == '__main__':
main()
| mit | Python |
|
a038d9e204bd54e69d5a84427bc9a56b04583460 | Create restart script | globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service | dbaas/maintenance/scripts/restart_database.py | dbaas/maintenance/scripts/restart_database.py | from datetime import date, timedelta
from maintenance.models import TaskSchedule
from logical.models import Database
def register_schedule_task_restart_database(hostnames):
today = date.today()
try:
databases = Database.objects.filter(
databaseinfra__instances__hostname__hostname__in=hostnames
).distinct()
for database in databases:
print("Checking database {}".format(database.name))
scheudled_tasks = TaskSchedule.objects.filter(
status=TaskSchedule.SCHEDULED,
database=database,
method_path='restart_database'
)
if scheudled_tasks:
print("Already scheduled for database {}!".format(
database.name)
)
else:
task = TaskSchedule.objects.create(
method_path='restart_database',
scheduled_for=TaskSchedule.next_maintenance_window(
today + timedelta(days=2),
database.databaseinfra.maintenance_window,
database.databaseinfra.maintenance_day
),
database=database
)
task.send_mail(is_new=True)
print("Done")
except Exception as err:
print("Error: {}".format(err))
| bsd-3-clause | Python |
|
93a396fdfc2b4a9f83ffbeb38c6f5a574f61478e | Add initial MeSH update script | clulab/bioresources | scripts/update_mesh.py | scripts/update_mesh.py | import os
import re
import csv
import gzip
import xml.etree.ElementTree as ET
from urllib.request import urlretrieve
def _get_term_names(record, name):
# We then need to look for additional terms related to the
# preferred concept to get additional names
concepts = record.findall('ConceptList/Concept')
all_term_names = []
for concept in concepts:
# We only look at the preferred concept here
if concept.attrib['PreferredConceptYN'] == 'Y':
terms = concept.findall('TermList/Term')
for term in terms:
term_name = term.find('String').text
if term_name != name:
all_term_names.append(term_name)
return all_term_names
def get_mesh_names(et):
names = {}
for record in et.iterfind('DescriptorRecord'):
# We first get the ID and the name
uid = record.find('DescriptorUI').text
tree_numbers = record.findall('TreeNumberList/TreeNumber')
# Diseases are in the C subtree
if not any(t.text[0] == 'C' for t in tree_numbers):
continue
name = record.find('DescriptorName/String').text
synonyms = _get_term_names(record, name)
names[uid] = [name] + synonyms
return names
def entries_from_names(names):
entries = []
for uid, synonyms in names.items():
for synonym in synonyms:
entries.append((synonym, uid))
print('Got a total of %d entries' % len(entries))
return entries
def load_mesh_resource_file():
url = 'ftp://nlmpubs.nlm.nih.gov/online/mesh/2019/xmlmesh/desc2019.gz'
desc_path = os.path.join(here, 'mesh_desc2019.gz')
if not os.path.exists(desc_path):
print('Download MeSH descriptors from %s' % url)
urlretrieve(url, desc_path)
print('Done downloading MeSH descriptors')
# Process the XML and find descriptor records
with gzip.open(desc_path) as desc_file:
print('Parsing MeSH descriptors')
et = ET.parse(desc_file)
return et
if __name__ == '__main__':
# Basic positioning
here = os.path.dirname(os.path.abspath(__file__))
kb_dir = os.path.join(here, os.pardir, 'src', 'main', 'resources', 'org',
'clulab', 'reach', 'kb')
resource_fname = os.path.join(kb_dir, 'mesh_disease.tsv')
et = load_mesh_resource_file()
mesh_names = get_mesh_names(et)
# We sort the entries first by the synonym but in a way that special
# characters and capitalization are ignored, then sort by ID
entries = entries_from_names(mesh_names)
entries = sorted(entries, key=(lambda x:
(re.sub('[^A-Za-z0-9]', '', x[0]).lower(),
x[1])))
# Now dump the entries into an updated TSV file
with open(resource_fname, 'w') as fh:
writer = csv.writer(fh, delimiter='\t')
for entry in entries:
writer.writerow(entry)
with open(resource_fname, 'rb') as f1, \
gzip.open(resource_fname + '.gz', 'wb') as f2:
f2.writelines(f1)
| apache-2.0 | Python |
|
6ea2d5af752e4765be8ef433139f72538fa3a2dd | Check that relationships in SsWang are up-to-date | tanghaibao/goatools,tanghaibao/goatools | tests/test_semsim_wang_termwise.py | tests/test_semsim_wang_termwise.py | #!/usr/bin/env python3
"""Test S-value for Table 1 in Wang_2007"""
__copyright__ = "Copyright (C) 2020-present, DV Klopfenstein. All rights reserved."
__author__ = "DV Klopfenstein"
from os.path import join
from sys import stdout
from goatools.base import get_godag
from goatools.semsim.termwise.wang import SsWang
from goatools.godag.consts import RELATIONSHIP_SET
from tests.utils import REPO
from tests.data.ssWang.tbl1 import GO2SVALUE
def test_semsim_wang(prt=stdout):
"""Wang Semantic Similarity tests"""
fin_godag = join(REPO, 'go-basic.obo')
run = Run(fin_godag, prt)
run.chk_relationships()
class Run:
"""Wang Semantic Similarity tests"""
def __init__(self, fin_godag, prt):
self.godag = get_godag(fin_godag, optional_attrs=['relationship'], prt=prt)
@staticmethod
def _chk_svalues_a(dag_a):
"""Check values against Table 1"""
assert len(dag_a.go2svalue) == len(GO2SVALUE)
for goid, svalue_act in dag_a.go2svalue.items():
svalue_exp = GO2SVALUE[goid]
assert abs(svalue_exp - svalue_act) < .001, 'MISMATCH EXP({}) != ACT({})'.format(
svalue_exp, svalue_act)
def chk_relationships(self):
"""Check that actual relationships are expected"""
rels_all = set()
for goterm in self.godag.values():
rels_cur = goterm.relationship.keys()
if rels_cur:
rels_all.update(rels_cur)
assert rels_all == RELATIONSHIP_SET, 'UNEXPECTED RELATIONSHIPS'
print('**PASSED: EXPECTED GODag RELATIONSHIPS: {R}'.format(R=sorted(rels_all)))
rels_all.add('is_a')
rels_act = set(SsWang.dflt_rel2scf.keys())
assert rels_all == rels_act, 'BAD SsWang RELATIONSHIPS: {Rs}'.format(Rs=rels_act)
print('**PASSED: EXPECTED SsWang RELATIONSHIPS: {R}'.format(R=sorted(rels_act)))
if __name__ == '__main__':
test_semsim_wang()
# Copyright (C) 2020-present DV Klopfenstein. All rights reserved.
| bsd-2-clause | Python |
|
43eb87c1297ac9999f027f275bce94b3e8f4894e | add problem | caoxudong/code_practice,caoxudong/code_practice,caoxudong/code_practice,caoxudong/code_practice | leetcode/14_longest_common_prefix.py | leetcode/14_longest_common_prefix.py | """
Write a function to find the longest common prefix string amongst an array of strings.
If there is no common prefix, return an empty string "".
Example 1:
Input: ["flower","flow","flight"]
Output: "fl"
Example 2:
Input: ["dog","racecar","car"]
Output: ""
Explanation: There is no common prefix among the input strings.
Note:
All given inputs are in lowercase letters a-z.
"""
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
| mit | Python |
|
67c991d5337d92602745fa5fc0a742c0a761e0e9 | Sort Colors | ChuanleiGuo/AlgorithmsPlayground,ChuanleiGuo/AlgorithmsPlayground,ChuanleiGuo/AlgorithmsPlayground,ChuanleiGuo/AlgorithmsPlayground | 75_Sort_Colors.py | 75_Sort_Colors.py | class Solution(object):
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
i = 0
j, k = i, len(nums) - 1
while j <= k:
if nums[j] == 0:
nums[i], nums[j] = nums[j], nums[i]
i += 1
j += 1
elif nums[j] == 1:
j += 1
else:
nums[j], nums[k] = nums[k], nums[j]
k -= 1
| mit | Python |
|
347f22593a20c5553b9469fad051dbaa34643082 | add test_log_likelihood.py | mit-probabilistic-computing-project/crosscat,fivejjs/crosscat,probcomp/crosscat,mit-probabilistic-computing-project/crosscat,fivejjs/crosscat,probcomp/crosscat,probcomp/crosscat,fivejjs/crosscat,probcomp/crosscat,mit-probabilistic-computing-project/crosscat,probcomp/crosscat,mit-probabilistic-computing-project/crosscat,fivejjs/crosscat,probcomp/crosscat,mit-probabilistic-computing-project/crosscat,mit-probabilistic-computing-project/crosscat,fivejjs/crosscat,probcomp/crosscat,mit-probabilistic-computing-project/crosscat,probcomp/crosscat,fivejjs/crosscat,fivejjs/crosscat | crosscat/tests/test_log_likelihood.py | crosscat/tests/test_log_likelihood.py | import argparse
from functools import partial
#
import pylab
pylab.ion()
pylab.show()
#
from crosscat.LocalEngine import LocalEngine
import crosscat.utils.data_utils as du
import crosscat.utils.timing_test_utils as ttu
import crosscat.utils.convergence_test_utils as ctu
parser = argparse.ArgumentParser()
parser.add_argument('--gen_seed', default=0, type=int)
parser.add_argument('--num_rows', default=100, type=int)
parser.add_argument('--num_cols', default=4, type=int)
parser.add_argument('--num_clusters', default=5, type=int)
parser.add_argument('--num_views', default=1, type=int)
parser.add_argument('--n_steps', default=10, type=int)
args = parser.parse_args()
#
gen_seed = args.gen_seed
num_rows = args.num_rows
num_cols = args.num_cols
num_clusters = args.num_clusters
num_views = args.num_views
n_steps = args.n_steps
#
n_test = num_rows / 10
# generate data
T, M_c, M_r, gen_X_L, gen_X_D = ttu.generate_clean_state(gen_seed, num_clusters,
num_cols, num_rows, num_views)
T_test = ctu.create_test_set(M_c, T, gen_X_L, gen_X_D, n_test, seed_seed=0)
engine = LocalEngine()
X_L, X_D = engine.initialize(M_c, M_r, T)
gen_mtll = ctu.calc_mean_test_log_likelihood(M_c, T, gen_X_L, gen_X_D, T_test)
gen_preplexity = ctu.calc_mean_test_log_likelihood(M_c, T, gen_X_L, gen_X_D, T)
# run inference
calc_perplexity = lambda p_State: \
ctu.calc_mean_test_log_likelihood(M_c, T, p_State.get_X_L(),
p_State.get_X_D(), T)
calc_test_log_likelihood = lambda p_State: \
ctu.calc_mean_test_log_likelihood(M_c, T, p_State.get_X_L(),
p_State.get_X_D(), T_test)
diagnostic_func_dict = dict(
perplexity=calc_perplexity,
test_log_likelihood=calc_test_log_likelihood,
)
X_L, X_D, diagnostics_dict = engine.analyze(M_c, T, X_L, X_D,
do_diagnostics=diagnostic_func_dict, n_steps=n_steps)
# plot
pylab.plot(diagnostics_dict['test_log_likelihood'], 'g')
pylab.plot(diagnostics_dict['perplexity'], 'r')
pylab.axhline(gen_mtll, color='k')
pylab.axhline(gen_preplexity, color='b')
| apache-2.0 | Python |
|
c48d852c2ceb39e6692be1b2c270aa75156e5b5e | Add migrations/0121_….py | lingdb/CoBL-public,lingdb/CoBL-public,lingdb/CoBL-public,lingdb/CoBL-public | ielex/lexicon/migrations/0121_copy_hindi_transliteration_to_urdu.py | ielex/lexicon/migrations/0121_copy_hindi_transliteration_to_urdu.py | # -*- coding: utf-8 -*-
# Inspired by:
# https://github.com/lingdb/CoBL/issues/223#issuecomment-256815113
from __future__ import unicode_literals, print_function
from django.db import migrations
def forwards_func(apps, schema_editor):
Language = apps.get_model("lexicon", "Language")
Meaning = apps.get_model("lexicon", "Meaning")
Lexeme = apps.get_model("lexicon", "Lexeme")
hindi = Language.objects.get(ascii_name='Hindi')
urdu = Language.objects.get(ascii_name='Urdu')
for meaning in Meaning.objects.all():
hLexemes = Lexeme.objects.filter(language=hindi, meaning=meaning).all()
uLexemes = Lexeme.objects.filter(language=urdu, meaning=meaning).all()
if len(hLexemes) != 1 or len(uLexemes) != 1:
continue
hLex = hLexemes[0]
uLex = uLexemes[0]
if uLex.transliteration == '' and hLex.transliteration != '':
uLex.transliteration = hLex.transliteration
uLex.save()
def reverse_func(apps, schema_editor):
print('Reverse of 0121_copy_hindi_transliteration_to_urdu does nothing.')
class Migration(migrations.Migration):
dependencies = [('lexicon', '306_0127_fix_issue_223')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
| bsd-2-clause | Python |
|
da5bd8b1afcffd8a0509a785183ce1474fe7f53c | Create insult.py | devzero-xyz/Andromeda-Plugins | insult.py | insult.py | """By Bowserinator: Insults people :D"""
from utils import add_cmd, add_handler
import utils
import random
name = "insult"
cmds = ["insult"]
insultPattern = [
"That [REPLACE] just cut me off!",
"My boss is a major [REPLACE]!",
"Don't tell her I said this, but that dude she's with is a real [REPLACE]!",
"Quit being such a [REPLACE]!",
"The only people who would vote for that guy are total [REPLACE]s!",
"What are you, some kind of [REPLACE]?",
"Dude's a real [REPLACE], you know what I mean?",
"He's got an ego like a [REPLACE]!",
"She was being a real [REPLACE] at the store today!",
"That [REPLACE] developer's code refuses to compile!",
"Her kids are total [REPLACE]s!",
"Whoever wrote this API documentation is a complete [REPLACE]!",
"That guy has the personality of a [REPLACE]!",
"I'm pretty sure I was a total [REPLACE] at the bar last night.",
"What kind of [REPLACE] buys pre-ground coffee?",
"I'd rather get a [REPLACE] to the eye than sit through this lecture.",
"Wow, that [REPLACE] just went off the deep end.",
"I may be a jerk, but at least I'm not like that [REPLACE] over there.",
"I need that like I need a [REPLACE] on my elbow.",
"What kind of [REPLACE] slows down to merge on the highway?",
"You've got a face like a [REPLACE].",
"Nothing personal, but you're a real [REPLACE].",
"What a bunch of [REPLACE]s.",
"That [REPLACE] is legally dead in 27 states - plus Guam.",
]
badwords = [
'Ass',
'Bitch',
'Butt',
'Cock',
'Cum',
'Cunt',
'Dick',
'Douche',
'Fart',
'Fuck',
'Jizz',
'Schlong',
'Shit',
'Slut',
'Snatch',
'Tit',
'Twat',
'Wang',
'Wank',
'Whore',
]
@add_cmd
def extract(irc, event, args):
send = "\x02" + args[0] +", \x0f" + random.choice(insultPattern).replace("[REPLACE]",random.choice(badwords).lower())
irc.reply(event, send)
add_handler(insult, name)
| mit | Python |
|
1a1bf760f9d912f6c19943b58198d947b4e65b84 | Add mraa GPIO test | mythi/intel-iot-refkit,jairglez/intel-iot-refkit,mythi/intel-iot-refkit,ipuustin/intel-iot-refkit,YinThong/intel-iot-refkit,jairglez/intel-iot-refkit,jairglez/intel-iot-refkit,klihub/intel-iot-refkit,YinThong/intel-iot-refkit,klihub/intel-iot-refkit,ipuustin/intel-iot-refkit,klihub/intel-iot-refkit,mythi/intel-iot-refkit,mythi/intel-iot-refkit,klihub/intel-iot-refkit,mythi/intel-iot-refkit,intel/intel-iot-refkit,jairglez/intel-iot-refkit,jairglez/intel-iot-refkit,ipuustin/intel-iot-refkit,ipuustin/intel-iot-refkit,mythi/intel-iot-refkit,intel/intel-iot-refkit,jairglez/intel-iot-refkit,ipuustin/intel-iot-refkit,intel/intel-iot-refkit,YinThong/intel-iot-refkit,intel/intel-iot-refkit,ipuustin/intel-iot-refkit,YinThong/intel-iot-refkit,intel/intel-iot-refkit,YinThong/intel-iot-refkit,intel/intel-iot-refkit,YinThong/intel-iot-refkit,klihub/intel-iot-refkit,klihub/intel-iot-refkit,YinThong/intel-iot-refkit,klihub/intel-iot-refkit,intel/intel-iot-refkit,jairglez/intel-iot-refkit,mythi/intel-iot-refkit,ipuustin/intel-iot-refkit | meta-iotqa/lib/oeqa/runtime/sanity/mraa_gpio.py | meta-iotqa/lib/oeqa/runtime/sanity/mraa_gpio.py | from oeqa.oetest import oeRuntimeTest
import unittest
import subprocess
from time import sleep
class MraaGpioTest(oeRuntimeTest):
'''
These tests require to use BeagleBone as testing host
'''
pin = ""
def setUp(self):
(status, output)= self.target.run("mraa-gpio version")
output = output.lower()
if any(x in output for x in ("broxton", "tuchuck", "joule")):
self.pin = "51"
elif "minnowboard" in output:
self.pin = "25"
else:
raise unittest.SkipTest(output)
def test_gpio(self):
'''
Test a GPIO pin on and off and check the pin output with
BeagleBone
'''
def check_gpio_output():
cmd = "cat /sys/class/gpio/gpio20/value".split()
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return int(output)
self.target.run("mraa-gpio set " + self.pin + " 0")
sleep(1)
output = check_gpio_output()
self.assertEqual(output, 0, msg="GPIO pin output is not 0")
self.target.run("mraa-gpio set " + self.pin + " 1")
sleep(1)
output = check_gpio_output()
self.assertEqual(output, 1, msg="GPIO pin output is not 1")
self.target.run("mraa-gpio set " + self.pin + " 0")
sleep(1)
output = check_gpio_output()
self.assertEqual(output, 0, msg="GPIO pin output is not 0")
| mit | Python |
|
8b7e84e98ccf0b44d7c6cc6ff23f462ec648d3f0 | add test | Eigenstate/msmbuilder,peastman/msmbuilder,msmbuilder/msmbuilder,mpharrigan/mixtape,Eigenstate/msmbuilder,Eigenstate/msmbuilder,mpharrigan/mixtape,dr-nate/msmbuilder,brookehus/msmbuilder,rafwiewiora/msmbuilder,rafwiewiora/msmbuilder,cxhernandez/msmbuilder,rafwiewiora/msmbuilder,dr-nate/msmbuilder,msultan/msmbuilder,cxhernandez/msmbuilder,mpharrigan/mixtape,msmbuilder/msmbuilder,msultan/msmbuilder,brookehus/msmbuilder,mpharrigan/mixtape,dr-nate/msmbuilder,Eigenstate/msmbuilder,msultan/msmbuilder,brookehus/msmbuilder,cxhernandez/msmbuilder,dr-nate/msmbuilder,msultan/msmbuilder,peastman/msmbuilder,cxhernandez/msmbuilder,brookehus/msmbuilder,peastman/msmbuilder,rafwiewiora/msmbuilder,msmbuilder/msmbuilder,cxhernandez/msmbuilder,rafwiewiora/msmbuilder,peastman/msmbuilder,Eigenstate/msmbuilder,peastman/msmbuilder,msmbuilder/msmbuilder,dr-nate/msmbuilder,msmbuilder/msmbuilder,brookehus/msmbuilder,msultan/msmbuilder,mpharrigan/mixtape | msmbuilder/tests/test_feature_selection.py | msmbuilder/tests/test_feature_selection.py | import numpy as np
from sklearn.feature_selection import VarianceThreshold as VarianceThresholdR
from ..featurizer import DihedralFeaturizer
from ..feature_selection import FeatureSelector, VarianceThreshold
from ..example_datasets import fetch_alanine_dipeptide as fetch_data
FEATS = [
('phi', DihedralFeaturizer(types=['phi'], sincos=True)),
('psi', DihedralFeaturizer(types=['psi'], sincos=True)),
]
def test_featureselector():
dataset = fetch_data()
trajectories = dataset["trajectories"]
fs = FeatureSelector(FEATS, which_feat='phi')
assert fs.which_feat == ['phi']
y1 = fs.partial_transform(trajectories[0])
y_ref1 = FEATS[0][1].partial_transform(trajectories[0])
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_featureselector_transform():
dataset = fetch_data()
trajectories = dataset["trajectories"]
fs = FeatureSelector(FEATS, which_feat='psi')
y1 = fs.transform(trajectories)
assert len(y1) == len(trajectories)
def test_variancethreshold_vs_sklearn():
dataset = fetch_data()
trajectories = dataset["trajectories"]
fs = FeatureSelector(FEATS)
vt = VarianceThreshold(0.1)
vtr = VarianceThresholdR(0.1)
y = fs.partial_transform(trajectories[0])
z1 = vt.fit_transform([y])[0]
z_ref1 = vtr.fit_transform(y)
np.testing.assert_array_almost_equal(z_ref1, z1)
| import numpy as np
from sklearn.feature_selection import VarianceThreshold as VarianceThresholdR
from ..featurizer import DihedralFeaturizer
from ..feature_selection import FeatureSelector, VarianceThreshold
from ..example_datasets import fetch_alanine_dipeptide as fetch_data
FEATS = [
('phi', DihedralFeaturizer(types=['phi'], sincos=True)),
('psi', DihedralFeaturizer(types=['psi'], sincos=True)),
]
def test_featureselector():
dataset = fetch_data()
trajectories = dataset["trajectories"]
fs = FeatureSelector(FEATS, which_feat='phi')
assert fs.which_feat == ['phi']
y1 = fs.partial_transform(trajectories[0])
y_ref1 = FEATS[0][1].partial_transform(trajectories[0])
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_variancethreshold_vs_sklearn():
dataset = fetch_data()
trajectories = dataset["trajectories"]
fs = FeatureSelector(FEATS)
vt = VarianceThreshold(0.1)
vtr = VarianceThresholdR(0.1)
y = fs.partial_transform(trajectories[0])
z1 = vt.fit_transform([y])[0]
z_ref1 = vtr.fit_transform(y)
np.testing.assert_array_almost_equal(z_ref1, z1)
| lgpl-2.1 | Python |
e2b74a9978de4a6f15273e3e098379107eb0bec3 | Create 0001_0.py | Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Show-Me-the-Code/python,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Show-Me-the-Code/python,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2 | pylyria/0001/0001_0.py | pylyria/0001/0001_0.py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#第 0001 题:做为 Apple Store App 独立开发者,你要搞限时促销,为你的应用生成激活码(或者优惠券),使用 Python 如何生成 200 个激活码(或者优惠券)?
import random
import string
def activation_code(id,length=16):
prefix = hex(int(id))[2:]+'V'
length = length - len(prefix)
chars=string.ascii_uppercase+string.digits
return prefix + ''.join([random.choice(chars) for i in range(length)])
def get_id(code):
return str(int(code.upper(), 16))
if __name__ == '__main__':
for i in range(10, 500, 23):
code = activation_code(i)
id_hex = code.split('L')[0]
id = get_id(id_hex)
print code,id
| mit | Python |
|
82f9edd572d440941e7de67398b3fdeb52d5c389 | Add new migration | openego/oeplatform,openego/oeplatform,openego/oeplatform,openego/oeplatform | modelview/migrations/0047_auto_20191021_1525.py | modelview/migrations/0047_auto_20191021_1525.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2019-10-21 13:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelview', '0046_auto_20191007_1630'),
]
operations = [
migrations.AlterField(
model_name='basicfactsheet',
name='license',
field=models.CharField(choices=[('Academic Free License v3.0', 'Academic Free License v3.0'), ('Apache license 2.0', 'Apache license 2.0'), ('Artistic license 2.0', 'Artistic license 2.0'), ('Boost Software License 1.0', 'Boost Software License 1.0'), ('BSD 2-clause "Simplified" license', 'BSD 2-clause "Simplified" license'), ('BSD 3-clause Clear license', 'BSD 3-clause Clear license'), ('Creative Commons license family', 'Creative Commons license family'), ('Creative Commons Zero v1.0 Universal', 'Creative Commons Zero v1.0 Universal'), ('Creative Commons Attribution 4.0', 'Creative Commons Attribution 4.0'), ('Creative Commons Attribution Share Alike 4.0', 'Creative Commons Attribution Share Alike 4.0'), ('Do What The F*ck You Want To Public License', 'Do What The F*ck You Want To Public License'), ('Educational Community License v2.0', 'Educational Community License v2.0'), ('Eclipse Public License 1.0', 'Eclipse Public License 1.0'), ('European Union Public License 1.1', 'European Union Public License 1.1'), ('GNU Affero General Public License v3.0', 'GNU Affero General Public License v3.0'), ('GNU General Public License family', 'GNU General Public License family'), ('GNU General Public License v2.0', 'GNU General Public License v2.0'), ('GNU General Public License v3.0', 'GNU General Public License v3.0'), ('GNU Lesser General Public License family', 'GNU Lesser General Public License family'), ('GNU Lesser General Public License v2.1', 'GNU Lesser General Public License v2.1'), ('GNU Lesser General Public License v3.0', 'GNU Lesser General Public License v3.0'), ('ISC', 'ISC'), ('LaTeX Project Public License v1.3c', 'LaTeX Project Public License v1.3c'), ('Microsoft Public License', 'Microsoft Public License'), ('MIT', 'MIT'), ('Mozilla Public License 2.0', 'Mozilla Public License 2.0'), ('Open Software License 3.0', 'Open Software License 3.0'), ('PostgreSQL License', 'PostgreSQL License'), ('SIL Open Font License 1.1', 'SIL Open Font License 1.1'), ('University of Illinois/NCSA Open Source License', 'University of Illinois/NCSA Open Source License'), ('The Unlicense', 'The Unlicense'), ('zLib License', 'zLib License'), ("BSD 3-clause 'New' or 'Revised' license", "BSD 3-clause 'New' or 'Revised' license"), ('Other', 'Other'), ('Unknown', 'Unknown')], default='Unknown', max_length=20, verbose_name='License'),
),
migrations.AlterField(
model_name='energyframework',
name='ci_FuelHandling',
field=models.BooleanField(default=False, verbose_name='Fuel handling'),
),
migrations.AlterField(
model_name='energyframework',
name='gs_single_project',
field=models.BooleanField(default=False, verbose_name='Single-project'),
),
migrations.AlterField(
model_name='energyframework',
name='inital_release_date',
field=models.DateField(help_text='When [mm-yyyy] was the framework initially released?', max_length=30, null=True, verbose_name='Inital Release Date'),
),
migrations.AlterField(
model_name='energyframework',
name='last_updated',
field=models.DateField(help_text='When was the factsheet last updated?', max_length=200, null=True, verbose_name='Last updated'),
),
]
| agpl-3.0 | Python |
|
45db21e2b4093cbda7976189327467ca3aebe1a3 | add instance serializer | CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend | api/v2/serializers/instance_serializer.py | api/v2/serializers/instance_serializer.py | from core.models import Instance
from rest_framework import serializers
from .identity_summary_serializer import IdentitySummarySerializer
from .user_serializer import UserSerializer
class InstanceSerializer(serializers.ModelSerializer):
identity = IdentitySummarySerializer(source='created_by_identity')
user = UserSerializer(source='created_by')
class Meta:
model = Instance
fields = ('id', 'name', 'ip_address', 'shell', 'vnc', 'start_date', 'end_date', 'identity', 'user')
| apache-2.0 | Python |
|
3fc5c2a4d3f13dc8062c93dd86fd94f06c35c91d | add an easy echo server by using python | ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study | network/echo-server/echo-iterative/main.py | network/echo-server/echo-iterative/main.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2016 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import socket
def handle(client_socket, client_address):
while True:
data = client_socket.recv(4096)
if data:
sent = client_socket.send(data)
else:
print 'disconnect', client_address
client_socket.close()
break
def main():
listen_address = ('0.0.0.0', 5555)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(listen_address)
server_socket.listen(5)
while True:
(client_socket, client_address) = server_socket.accept()
print 'got connection from', client_address
handle(client_socket, client_address)
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
|
48d774b8bdcaa924303b905cef27b4eb13f08fd6 | Add pillar_roots to the wheel system | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/wheel/pillar_roots.py | salt/wheel/pillar_roots.py | '''
The `pillar_roots` wheel module is used to manage files under the pillar roots
directories on the master server.
'''
# Import python libs
import os
# Import salt libs
import salt.utils
def find(path, env='base'):
'''
Return a dict of the files located with the given path and environment
'''
# Return a list of paths + text or bin
ret = []
if env not in __opts__['pillar_roots']:
return ret
for root in __opts__['pillar_roots'][env]:
full = os.path.join(root, path)
if os.path.isfile(full):
# Add it to the dict
with open(full, 'rb') as fp_:
if salt.utils.istextfile(fp_):
ret.append({full: 'txt'})
else:
ret.append({full: 'bin'})
return ret
def list_env(env='base'):
'''
Return all of the file paths found in an environment
'''
ret = {}
if not env in __opts__['pillar_roots']:
return ret
for f_root in __opts__['pillar_roots'][env]:
ret[f_root] = {}
for root, dirs, files in os.walk(f_root):
sub = ret[f_root]
if root != f_root:
# grab subroot ref
sroot = root
above = []
# Populate the above dict
while not os.path.samefile(sroot, f_root):
base = os.path.basename(sroot)
if base:
above.insert(0, base)
sroot = os.path.dirname(sroot)
for aroot in above:
sub = sub[aroot]
for dir_ in dirs:
sub[dir_] = {}
for fn_ in files:
sub[fn_] = 'f'
return ret
def list_roots():
'''
Return all of the files names in all available environments
'''
ret = {}
for env in __opts__['pillar_roots']:
ret[env] = []
ret[env].append(list_env(env))
return ret
def read(path, env='base'):
'''
Read the contents of a text file, if the file is binary then
'''
# Return a dict of paths + content
ret = []
files = find(path, env)
for fn_ in files:
full = fn_.keys()[0]
form = fn_[full]
if form == 'txt':
with open(full, 'rb') as fp_:
ret.append({full: fp_.read()})
return ret
def write(data, path, env='base', index=0):
'''
Write the named file, by default the first file found is written, but the
index of the file can be specified to write to a lower priority file root
'''
if not env in __opts__['pillar_roots']:
return 'Named environment {0} is not present'.format(env)
if not len(__opts__['pillar_roots'][env]) > index:
return 'Specified index {0} in environment {1} is not present'.format(
index, env)
if os.path.isabs(path):
return ('The path passed in {0} is not relative to the environment '
'{1}').format(path, env)
dest = os.path.join(__opts__['pillar_roots'][env][index], path)
dest_dir = os.path.dirname(dest)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
with open(dest, 'w+') as fp_:
fp_.write(data)
return 'Wrote data to file {0}'.format(dest)
| apache-2.0 | Python |
|
07fd61306e645b7240883d5d468f94be5ce8a34c | Add a command to retrieve all triggers | HubbeKing/Hubbot_Twisted | Commands/Triggers.py | Commands/Triggers.py | from IRCResponse import IRCResponse, ResponseType
from CommandInterface import CommandInterface
import GlobalVars
class Command(CommandInterface):
triggers = ["triggers"]
help = "triggers -- returns a list of all command triggers, must be over PM"
def execute(self, Hubbot, message):
if message.User.Name != message.ReplyTo:
return IRCResponse(ResponseType.Say, "{} must be used over PM!".format(message.Command), message.ReplyTo)
else:
response = ""
for name, command in GlobalVars.commands.iteritems():
if len(command.triggers)>0:
for trigger in command.triggers:
if "<" not in trigger and trigger not in response:
response += "{}, ".format(trigger)
return IRCResponse(ResponseType.Say, response, message.ReplyTo) | mit | Python |
|
b173aa1a6dc1c361d65150c6782db7618a5ff126 | Add simple indexing test. | njase/numpy,rajathkumarmp/numpy,ekalosak/numpy,stefanv/numpy,skymanaditya1/numpy,andsor/numpy,pyparallel/numpy,mingwpy/numpy,AustereCuriosity/numpy,bertrand-l/numpy,ViralLeadership/numpy,dch312/numpy,nguyentu1602/numpy,pizzathief/numpy,jakirkham/numpy,mwiebe/numpy,mingwpy/numpy,naritta/numpy,hainm/numpy,brandon-rhodes/numpy,BMJHayward/numpy,MSeifert04/numpy,shoyer/numpy,kiwifb/numpy,bringingheavendown/numpy,dimasad/numpy,embray/numpy,utke1/numpy,joferkington/numpy,ssanderson/numpy,pdebuyl/numpy,leifdenby/numpy,ewmoore/numpy,dato-code/numpy,tdsmith/numpy,sinhrks/numpy,Yusa95/numpy,jorisvandenbossche/numpy,pbrod/numpy,rajathkumarmp/numpy,pyparallel/numpy,kiwifb/numpy,bmorris3/numpy,maniteja123/numpy,ChristopherHogan/numpy,NextThought/pypy-numpy,MSeifert04/numpy,Yusa95/numpy,rherault-insa/numpy,seberg/numpy,bmorris3/numpy,CMartelLML/numpy,numpy/numpy-refactor,mattip/numpy,sonnyhu/numpy,BabeNovelty/numpy,nguyentu1602/numpy,cjermain/numpy,cjermain/numpy,KaelChen/numpy,grlee77/numpy,mortada/numpy,naritta/numpy,pdebuyl/numpy,Anwesh43/numpy,ContinuumIO/numpy,rhythmsosad/numpy,madphysicist/numpy,felipebetancur/numpy,Srisai85/numpy,mathdd/numpy,argriffing/numpy,jorisvandenbossche/numpy,rhythmsosad/numpy,abalkin/numpy,andsor/numpy,rgommers/numpy,madphysicist/numpy,Eric89GXL/numpy,stefanv/numpy,groutr/numpy,GrimDerp/numpy,tynn/numpy,bertrand-l/numpy,NextThought/pypy-numpy,dimasad/numpy,ddasilva/numpy,utke1/numpy,rgommers/numpy,hainm/numpy,sonnyhu/numpy,ContinuumIO/numpy,rhythmsosad/numpy,pbrod/numpy,matthew-brett/numpy,dimasad/numpy,trankmichael/numpy,shoyer/numpy,Dapid/numpy,tacaswell/numpy,pbrod/numpy,rmcgibbo/numpy,MaPePeR/numpy,jakirkham/numpy,dch312/numpy,jonathanunderwood/numpy,GaZ3ll3/numpy,rajathkumarmp/numpy,argriffing/numpy,pbrod/numpy,rherault-insa/numpy,dwf/numpy,musically-ut/numpy,anntzer/numpy,tynn/numpy,dimasad/numpy,SunghanKim/numpy,immerrr/numpy,sinhrks/numpy,sigma-random/numpy,Yusa95/numpy,stuarteberg/numpy,chiffa/numpy,pelson/numpy,ewmoore/numpy,rmcgibbo/numpy,mingwpy/numpy,skwbc/numpy,matthew-brett/numpy,ogrisel/numpy,mattip/numpy,drasmuss/numpy,shoyer/numpy,jakirkham/numpy,rudimeier/numpy,nguyentu1602/numpy,mhvk/numpy,simongibbons/numpy,sigma-random/numpy,SunghanKim/numpy,tacaswell/numpy,skwbc/numpy,shoyer/numpy,WarrenWeckesser/numpy,dwillmer/numpy,rudimeier/numpy,mortada/numpy,rudimeier/numpy,jschueller/numpy,ajdawson/numpy,matthew-brett/numpy,maniteja123/numpy,sonnyhu/numpy,mortada/numpy,skymanaditya1/numpy,dato-code/numpy,trankmichael/numpy,ekalosak/numpy,utke1/numpy,rmcgibbo/numpy,CMartelLML/numpy,numpy/numpy,grlee77/numpy,sigma-random/numpy,ahaldane/numpy,jorisvandenbossche/numpy,MichaelAquilina/numpy,hainm/numpy,dwf/numpy,BMJHayward/numpy,gmcastil/numpy,sinhrks/numpy,skymanaditya1/numpy,stuarteberg/numpy,MaPePeR/numpy,endolith/numpy,ddasilva/numpy,cowlicks/numpy,pelson/numpy,matthew-brett/numpy,cowlicks/numpy,astrofrog/numpy,ssanderson/numpy,MichaelAquilina/numpy,ewmoore/numpy,Dapid/numpy,pizzathief/numpy,mingwpy/numpy,dwillmer/numpy,shoyer/numpy,tacaswell/numpy,brandon-rhodes/numpy,Eric89GXL/numpy,ogrisel/numpy,seberg/numpy,mattip/numpy,pelson/numpy,mhvk/numpy,behzadnouri/numpy,pelson/numpy,behzadnouri/numpy,grlee77/numpy,andsor/numpy,has2k1/numpy,solarjoe/numpy,mhvk/numpy,jorisvandenbossche/numpy,sigma-random/numpy,sonnyhu/numpy,ESSS/numpy,cjermain/numpy,leifdenby/numpy,MSeifert04/numpy,joferkington/numpy,pelson/numpy,joferkington/numpy,madphysicist/numpy,tynn/numpy,anntzer/numpy,stefanv/numpy,jschueller/numpy,has2k1/numpy,cjermain/numpy,ahaldane/numpy,larsmans/numpy,Linkid/numpy,has2k1/numpy,embray/numpy,pizzathief/numpy,bertrand-l/numpy,numpy/numpy-refactor,njase/numpy,stefanv/numpy,KaelChen/numpy,stuarteberg/numpy,MichaelAquilina/numpy,nbeaver/numpy,mindw/numpy,mathdd/numpy,jankoslavic/numpy,SiccarPoint/numpy,jankoslavic/numpy,kiwifb/numpy,simongibbons/numpy,yiakwy/numpy,Eric89GXL/numpy,larsmans/numpy,mwiebe/numpy,kirillzhuravlev/numpy,ssanderson/numpy,b-carter/numpy,AustereCuriosity/numpy,ChristopherHogan/numpy,brandon-rhodes/numpy,b-carter/numpy,NextThought/pypy-numpy,bringingheavendown/numpy,dato-code/numpy,embray/numpy,solarjoe/numpy,ajdawson/numpy,njase/numpy,moreati/numpy,MaPePeR/numpy,brandon-rhodes/numpy,dch312/numpy,CMartelLML/numpy,mathdd/numpy,SiccarPoint/numpy,seberg/numpy,ewmoore/numpy,nbeaver/numpy,empeeu/numpy,SunghanKim/numpy,numpy/numpy,trankmichael/numpy,jonathanunderwood/numpy,mwiebe/numpy,groutr/numpy,ESSS/numpy,argriffing/numpy,endolith/numpy,ewmoore/numpy,ogrisel/numpy,charris/numpy,rajathkumarmp/numpy,Anwesh43/numpy,ajdawson/numpy,kirillzhuravlev/numpy,endolith/numpy,yiakwy/numpy,Linkid/numpy,immerrr/numpy,chatcannon/numpy,gmcastil/numpy,GaZ3ll3/numpy,mindw/numpy,Anwesh43/numpy,empeeu/numpy,rgommers/numpy,skymanaditya1/numpy,jschueller/numpy,WillieMaddox/numpy,empeeu/numpy,ogrisel/numpy,musically-ut/numpy,gfyoung/numpy,abalkin/numpy,gfyoung/numpy,cowlicks/numpy,dwf/numpy,has2k1/numpy,leifdenby/numpy,NextThought/pypy-numpy,madphysicist/numpy,numpy/numpy,ChanderG/numpy,mattip/numpy,astrofrog/numpy,rgommers/numpy,Anwesh43/numpy,ddasilva/numpy,Yusa95/numpy,sinhrks/numpy,pdebuyl/numpy,BabeNovelty/numpy,hainm/numpy,mindw/numpy,KaelChen/numpy,jankoslavic/numpy,bringingheavendown/numpy,ajdawson/numpy,anntzer/numpy,mhvk/numpy,MSeifert04/numpy,cowlicks/numpy,moreati/numpy,dwf/numpy,ekalosak/numpy,CMartelLML/numpy,githubmlai/numpy,mortada/numpy,grlee77/numpy,WarrenWeckesser/numpy,embray/numpy,ESSS/numpy,felipebetancur/numpy,BMJHayward/numpy,bmorris3/numpy,ContinuumIO/numpy,kirillzhuravlev/numpy,pbrod/numpy,GrimDerp/numpy,BabeNovelty/numpy,SiccarPoint/numpy,seberg/numpy,ViralLeadership/numpy,jakirkham/numpy,astrofrog/numpy,charris/numpy,charris/numpy,GrimDerp/numpy,MaPePeR/numpy,pdebuyl/numpy,joferkington/numpy,Srisai85/numpy,stefanv/numpy,musically-ut/numpy,jakirkham/numpy,charris/numpy,immerrr/numpy,chatcannon/numpy,GaZ3ll3/numpy,jschueller/numpy,SunghanKim/numpy,WarrenWeckesser/numpy,nguyentu1602/numpy,numpy/numpy-refactor,maniteja123/numpy,astrofrog/numpy,GaZ3ll3/numpy,jankoslavic/numpy,Eric89GXL/numpy,trankmichael/numpy,larsmans/numpy,WillieMaddox/numpy,Srisai85/numpy,stuarteberg/numpy,abalkin/numpy,ahaldane/numpy,rudimeier/numpy,WarrenWeckesser/numpy,numpy/numpy,tdsmith/numpy,ChristopherHogan/numpy,githubmlai/numpy,kirillzhuravlev/numpy,ChanderG/numpy,chiffa/numpy,BabeNovelty/numpy,githubmlai/numpy,chatcannon/numpy,nbeaver/numpy,dwillmer/numpy,madphysicist/numpy,embray/numpy,yiakwy/numpy,ChanderG/numpy,drasmuss/numpy,mindw/numpy,simongibbons/numpy,ahaldane/numpy,githubmlai/numpy,groutr/numpy,chiffa/numpy,yiakwy/numpy,WarrenWeckesser/numpy,matthew-brett/numpy,bmorris3/numpy,naritta/numpy,dato-code/numpy,b-carter/numpy,Srisai85/numpy,ChanderG/numpy,WillieMaddox/numpy,immerrr/numpy,BMJHayward/numpy,larsmans/numpy,skwbc/numpy,ViralLeadership/numpy,dch312/numpy,Dapid/numpy,KaelChen/numpy,anntzer/numpy,rmcgibbo/numpy,pizzathief/numpy,Linkid/numpy,moreati/numpy,MSeifert04/numpy,pyparallel/numpy,grlee77/numpy,SiccarPoint/numpy,simongibbons/numpy,jonathanunderwood/numpy,andsor/numpy,tdsmith/numpy,gfyoung/numpy,Linkid/numpy,AustereCuriosity/numpy,naritta/numpy,ChristopherHogan/numpy,dwf/numpy,astrofrog/numpy,felipebetancur/numpy,mhvk/numpy,jorisvandenbossche/numpy,numpy/numpy-refactor,endolith/numpy,empeeu/numpy,mathdd/numpy,ogrisel/numpy,numpy/numpy-refactor,gmcastil/numpy,GrimDerp/numpy,drasmuss/numpy,rherault-insa/numpy,ahaldane/numpy,felipebetancur/numpy,musically-ut/numpy,behzadnouri/numpy,ekalosak/numpy,rhythmsosad/numpy,solarjoe/numpy,MichaelAquilina/numpy,tdsmith/numpy,simongibbons/numpy,dwillmer/numpy,pizzathief/numpy | benchmarks/simpleindex.py | benchmarks/simpleindex.py | import timeit
# This is to show that NumPy is a poorer choice than nested Python lists
# if you are writing nested for loops.
# This is slower than Numeric was but Numeric was slower than Python lists were
# in the first place.
N = 30
code2 = r"""
for k in xrange(%d):
for l in xrange(%d):
res = a[k,l].item() + a[l,k].item()
""" % (N,N)
code3 = r"""
for k in xrange(%d):
for l in xrange(%d):
res = a[k][l] + a[l][k]
""" % (N,N)
code = r"""
for k in xrange(%d):
for l in xrange(%d):
res = a[k,l] + a[l,k]
""" % (N,N)
setup3 = r"""
import random
a = [[None for k in xrange(%d)] for l in xrange(%d)]
for k in xrange(%d):
for l in xrange(%d):
a[k][l] = random.random()
""" % (N,N,N,N)
t1 = timeit.Timer(code, 'import numpy as N; a = N.rand(%d,%d)' % (N,N))
t2 = timeit.Timer(code, 'import MLab as N; a=N.rand(%d,%d)' % (N,N))
t3 = timeit.Timer(code, 'import numarray.mlab as N; a=N.rand(%d,%d)' % (N,N))
t4 = timeit.Timer(code2, 'import numpy as N; a = N.rand(%d,%d)' % (N,N))
t5 = timeit.Timer(code3, setup3)
t6 = timeit.Timer("res = a + a.transpose()","import numpy as N; a=N.rand(%d,%d)" % (N,N))
print "shape = ", (N,N)
print "NumPy 1: ", t1.repeat(3,100)
print "NumPy 2: ", t4.repeat(3,100)
print "Numeric: ", t2.repeat(3,100)
print "Numarray: ", t3.repeat(3,100)
print "Python: ", t5.repeat(3,100)
print "Optimized: ", t6.repeat(3,100)
| bsd-3-clause | Python |
|
99061bec96a7337e6ddc1d698f00805f84089b3b | Set content headers on download | bepasty/bepasty-server,makefu/bepasty-server,makefu/bepasty-server,bepasty/bepasty-server,makefu/bepasty-server,bepasty/bepasty-server,bepasty/bepasty-server | bepasty/views/download.py | bepasty/views/download.py | # Copyright: 2013 Bastian Blank <bastian@waldi.eu.org>
# License: BSD 2-clause, see LICENSE for details.
from flask import Response, current_app, stream_with_context
from flask.views import MethodView
from ..utils.name import ItemName
from . import blueprint
class DownloadView(MethodView):
def get(self, name):
n = ItemName.parse(name)
item = current_app.storage.open(n)
def stream():
try:
# Stream content from storage
offset = 0
size = item.data.size
while offset < size:
buf = item.data.read(16*1024, offset)
offset += len(buf)
yield buf
finally:
item.close()
ret = Response(stream_with_context(stream()))
ret.headers['Content-Disposition'] = 'attachment; filename="{}"'.format(item.meta['filename'])
ret.headers['Content-Length'] = item.meta['size']
return ret
blueprint.add_url_rule('/<name>/+download', view_func=DownloadView.as_view('download'))
| # Copyright: 2013 Bastian Blank <bastian@waldi.eu.org>
# License: BSD 2-clause, see LICENSE for details.
from flask import Response, current_app, stream_with_context
from flask.views import MethodView
from ..utils.name import ItemName
from . import blueprint
class DownloadView(MethodView):
def get(self, name):
n = ItemName.parse(name)
item = current_app.storage.open(n)
def stream():
try:
# Stream content from storage
offset = 0
size = item.data.size
while offset < size:
buf = item.data.read(16*1024, offset)
offset += len(buf)
yield buf
finally:
item.close()
return Response(stream_with_context(stream()))
blueprint.add_url_rule('/<name>/+download', view_func=DownloadView.as_view('download'))
| bsd-2-clause | Python |
5787d3ff813d2c96d0ec2c2fd90f91b93315e564 | Add stub for cliches | amperser/proselint,amperser/proselint,amperser/proselint,amperser/proselint,jstewmon/proselint,jstewmon/proselint,jstewmon/proselint,amperser/proselint | proselint/checks/inprogress/wgd_cliches.py | proselint/checks/inprogress/wgd_cliches.py | """WGD101: Cliches.
---
layout: post
error_code: WGD101
source: write-good
source_url: https://github.com/btford/write-good
title: WGD101: Cliches
date: 2014-06-10 12:31:19
categories: writing
---
Cliches are cliche.
"""
def check(text):
error_code = "WGD101"
msg = "Cliche."
return [(1, 1, error_code, msg)]
| bsd-3-clause | Python |
|
9068fd506811113c50886bf9c8f4094b7e1bd7a3 | Add stats.py from week 2. | UI-DataScience/summer2014 | hw3/stats.py | hw3/stats.py | #!/usr/bin/python
# Week 2 Problem 3. Simple statistics.
# Use Python 3 print() function, Python 3 integer division
from __future__ import print_function, division
def get_stats(input_list):
'''
Accepts a list of integers, and returns a tuple of four numbers:
minimum(int), maximum(int), mean(float), and median(float)
>>> get_stats([0, 1, 2, 3, 4])
(0, 4, 2.0, 2.0)
>>> get_stats([0, 1, 2, 3, 4, 5])
(0, 5, 2.5, 2.5)
>>> get_stats([0, 1, 2, 5])
(0, 5, 2.0, 1.5)
>>> get_stats([0, 1, 2, 4, 5])
(0, 5, 2.4, 2.0)
'''
# min() and max() are in the standard library
# you could also write
# minimum = sorted(input_list)[0]
# maximum = sorted(input_list)[-1]
minimum = min(input_list)
maximum = max(input_list)
# use the sum() function from the standard library to calculate mean
# this is equivalent to
# total = length = 0
# for i in input_list: total += i
# for i in input_list: length += 1
# mean = total / length
mean = sum(input_list) / len(input_list)
# calculate the median
# if the number of elements is even, we take the average of 2 middle numbers
# if the number of elements is odd, median is the middle element
# note that we used the Python 3 integer division // to get integer
if len(input_list) % 2:
median = input_list[(len(input_list) - 1) // 2 ]
else:
median = 0.5 * (input_list[(len(input_list) - 1) // 2] \
+ input_list[len(input_list) // 2])
# return a tuple of min, max, mean, median
return minimum, maximum, mean, median
if __name__ == '__main__':
# we will test our function with a list of integers from 0 to 50
my_list = range(0, 51)
# get_stats returns a tuple of min, max, mean, median of my_list
# print out min, max, mean, median on each line
print("Minimum: %i\nMaximum: %i\nMean: %.1f\nMedian: %.1f" % get_stats(my_list)) | mit | Python |
|
84f31dfa718a2f557b0058920037265331fd1a3f | Add missing merge migration | cslzchen/osf.io,mfraezz/osf.io,mattclark/osf.io,felliott/osf.io,saradbowman/osf.io,icereval/osf.io,pattisdr/osf.io,caseyrollins/osf.io,aaxelb/osf.io,brianjgeiger/osf.io,HalcyonChimera/osf.io,cslzchen/osf.io,icereval/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,sloria/osf.io,mattclark/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,sloria/osf.io,sloria/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,icereval/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,baylee-d/osf.io,baylee-d/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,erinspace/osf.io,erinspace/osf.io,brianjgeiger/osf.io,felliott/osf.io,pattisdr/osf.io,felliott/osf.io,pattisdr/osf.io,adlius/osf.io,adlius/osf.io,caseyrollins/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,aaxelb/osf.io,mfraezz/osf.io,Johnetordoff/osf.io,caseyrollins/osf.io,adlius/osf.io,baylee-d/osf.io,Johnetordoff/osf.io,saradbowman/osf.io,erinspace/osf.io,adlius/osf.io,felliott/osf.io | osf/migrations/0099_merge_20180427_1109.py | osf/migrations/0099_merge_20180427_1109.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-27 16:09
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0098_merge_20180416_1807'),
('osf', '0098_auto_20180418_1722'),
]
operations = [
]
| apache-2.0 | Python |
|
99d7a6dd79e0661bb047198261d624fd62e41406 | add missing file | markr622/moose,katyhuff/moose,jhbradley/moose,permcody/moose,tonkmr/moose,bwspenc/moose,tonkmr/moose,jinmm1992/moose,harterj/moose,idaholab/moose,cpritam/moose,raghavaggarwal/moose,jbair34/moose,markr622/moose,jiangwen84/moose,danielru/moose,backmari/moose,raghavaggarwal/moose,roystgnr/moose,giopastor/moose,sapitts/moose,idaholab/moose,harterj/moose,jhbradley/moose,backmari/moose,yipenggao/moose,jinmm1992/moose,zzyfisherman/moose,bwspenc/moose,cpritam/moose,yipenggao/moose,lindsayad/moose,friedmud/moose,dschwen/moose,sapitts/moose,adamLange/moose,cpritam/moose,jessecarterMOOSE/moose,shanestafford/moose,SudiptaBiswas/moose,shanestafford/moose,giopastor/moose,jbair34/moose,tonkmr/moose,giopastor/moose,bwspenc/moose,zzyfisherman/moose,xy515258/moose,jiangwen84/moose,raghavaggarwal/moose,jasondhales/moose,WilkAndy/moose,waxmanr/moose,sapitts/moose,yipenggao/moose,Chuban/moose,dschwen/moose,friedmud/moose,lindsayad/moose,shanestafford/moose,jhbradley/moose,roystgnr/moose,jiangwen84/moose,cpritam/moose,WilkAndy/moose,danielru/moose,giopastor/moose,markr622/moose,andrsd/moose,andrsd/moose,Chuban/moose,kasra83/moose,laagesen/moose,shanestafford/moose,milljm/moose,jessecarterMOOSE/moose,liuwenf/moose,lindsayad/moose,joshua-cogliati-inl/moose,WilkAndy/moose,jessecarterMOOSE/moose,Chuban/moose,laagesen/moose,waxmanr/moose,adamLange/moose,friedmud/moose,roystgnr/moose,jasondhales/moose,apc-llc/moose,mellis13/moose,capitalaslash/moose,backmari/moose,tonkmr/moose,roystgnr/moose,harterj/moose,roystgnr/moose,laagesen/moose,wgapl/moose,harterj/moose,SudiptaBiswas/moose,katyhuff/moose,mellis13/moose,liuwenf/moose,nuclear-wizard/moose,idaholab/moose,zzyfisherman/moose,dschwen/moose,jessecarterMOOSE/moose,jbair34/moose,jasondhales/moose,shanestafford/moose,capitalaslash/moose,roystgnr/moose,joshua-cogliati-inl/moose,nuclear-wizard/moose,sapitts/moose,milljm/moose,joshua-cogliati-inl/moose,tonkmr/moose,apc-llc/moose,andrsd/moose,katyhuff/moose,YaqiWang/moose,shanestafford/moose,yipenggao/moose,nuclear-wizard/moose,kasra83/moose,stimpsonsg/moose,stimpsonsg/moose,liuwenf/moose,friedmud/moose,cpritam/moose,jinmm1992/moose,danielru/moose,jasondhales/moose,YaqiWang/moose,markr622/moose,waxmanr/moose,SudiptaBiswas/moose,dschwen/moose,Chuban/moose,lindsayad/moose,xy515258/moose,dschwen/moose,harterj/moose,apc-llc/moose,kasra83/moose,milljm/moose,kasra83/moose,jinmm1992/moose,liuwenf/moose,idaholab/moose,WilkAndy/moose,liuwenf/moose,zzyfisherman/moose,capitalaslash/moose,WilkAndy/moose,mellis13/moose,permcody/moose,backmari/moose,nuclear-wizard/moose,apc-llc/moose,wgapl/moose,stimpsonsg/moose,xy515258/moose,wgapl/moose,bwspenc/moose,jiangwen84/moose,idaholab/moose,wgapl/moose,adamLange/moose,SudiptaBiswas/moose,mellis13/moose,danielru/moose,lindsayad/moose,milljm/moose,bwspenc/moose,cpritam/moose,YaqiWang/moose,permcody/moose,katyhuff/moose,joshua-cogliati-inl/moose,jhbradley/moose,WilkAndy/moose,roystgnr/moose,waxmanr/moose,permcody/moose,jessecarterMOOSE/moose,adamLange/moose,tonkmr/moose,laagesen/moose,xy515258/moose,andrsd/moose,YaqiWang/moose,jbair34/moose,liuwenf/moose,SudiptaBiswas/moose,milljm/moose,raghavaggarwal/moose,zzyfisherman/moose,laagesen/moose,andrsd/moose,zzyfisherman/moose,capitalaslash/moose,stimpsonsg/moose,sapitts/moose | gui/vtk/ExodusResult.py | gui/vtk/ExodusResult.py | import os, sys, PyQt4, getopt
from PyQt4 import QtCore, QtGui
import vtk
import time
class ExodusResult:
def __init__(self, render_widget, renderer, plane):
self.render_widget = render_widget
self.renderer = renderer
self.plane = plane
self.current_actors = []
def setFileName(self, file_name):
self.currently_has_actor = True
self.file_name = file_name
self.reader = vtk.vtkExodusIIReader()
self.reader.SetFileName(self.file_name)
self.reader.UpdateInformation()
self.current_dim = self.reader.GetDimensionality()
self.min_timestep = 0
self.max_timestep = 0
range = self.reader.GetTimeStepRange()
self.min_timestep = range[0]
self.max_timestep = range[1]
self.reader.SetAllArrayStatus(vtk.vtkExodusIIReader.ELEM_BLOCK, 1)
self.reader.SetAllArrayStatus(vtk.vtkExodusIIReader.NODAL, 1)
self.reader.SetAllArrayStatus(vtk.vtkExodusIIReader.NODAL_TEMPORAL, 1)
self.reader.SetTimeStep(self.max_timestep)
self.reader.Update()
self.current_variable_point_data = {}
self.current_variables = []
self.current_nodal_components = {}
self.current_elemental_components = {}
self.component_index = -1
cdp = vtk.vtkCompositeDataPipeline()
vtk.vtkAlgorithm.SetDefaultExecutivePrototype(cdp)
self.output = self.reader.GetOutput()
self.geom = vtk.vtkCompositeDataGeometryFilter()
self.geom.SetInputConnection(0,self.reader.GetOutputPort(0))
self.geom.Update()
self.lut = vtk.vtkLookupTable()
self.lut.SetHueRange(0.667, 0.0)
self.lut.SetNumberOfColors(256)
self.lut.Build()
self.data = self.geom.GetOutput()
num_nodal_variables = self.data.GetPointData().GetNumberOfArrays()
for var_num in xrange(num_nodal_variables):
var_name = self.data.GetPointData().GetArrayName(var_num)
self.current_variables.append(var_name)
components = self.data.GetPointData().GetVectors(var_name).GetNumberOfComponents()
self.current_nodal_components[var_name] = components
# self.data.GetPointData().GetVectors(value_string).GetComponentName(0)
num_elemental_variables = self.data.GetCellData().GetNumberOfArrays()
for var_num in xrange(num_elemental_variables):
var_name = self.data.GetCellData().GetArrayName(var_num)
self.current_variables.append(var_name)
components = self.data.GetCellData().GetVectors(var_name).GetNumberOfComponents()
self.current_elemental_components[var_name] = components
self.mapper = vtk.vtkPolyDataMapper()
self.mapper.SetInput(self.data)
self.mapper.ScalarVisibilityOn()
self.mapper.SetLookupTable(self.lut)
self.actor = vtk.vtkActor()
self.current_actors.append(self.actor)
self.actor.SetMapper(self.mapper)
self.renderer.AddActor(self.actor)
self.current_actor = self.actor
self.clipper = vtk.vtkTableBasedClipDataSet()
self.clipper.SetInput(self.output)
self.clipper.SetClipFunction(self.plane)
self.clipper.Update()
self.clip_geom = vtk.vtkCompositeDataGeometryFilter()
self.clip_geom.SetInputConnection(0,self.clipper.GetOutputPort(0))
self.clip_geom.Update()
self.clip_data = self.clip_geom.GetOutput()
self.clip_mapper = vtk.vtkPolyDataMapper()
self.clip_mapper.SetInput(self.clip_data)
self.clip_mapper.ScalarVisibilityOn()
self.clip_mapper.SetLookupTable(self.lut)
self.clip_actor = vtk.vtkActor()
self.clip_actor.SetMapper(self.clip_mapper)
self.current_actors.append(self.clip_actor)
self.scalar_bar = vtk.vtkScalarBarActor()
self.current_actors.append(self.scalar_bar)
self.scalar_bar.SetLookupTable(self.mapper.GetLookupTable())
self.scalar_bar.SetNumberOfLabels(4)
self.current_bounds = self.actor.GetBounds()
| lgpl-2.1 | Python |
|
d6850ebe441a966dcf17f5cb8b0ce57a7c9dce8a | Add argument parsing | Relrin/Helenae,Relrin/Helenae,Relrin/Helenae | helenae/db/create_db.py | helenae/db/create_db.py | from optparse import OptionParser
import sqlalchemy.exc
from sqlalchemy import text
from sqlalchemy.orm import sessionmaker
from tables import *
def create_db():
"""
Defined tables at tables.py file are created in some DB
"""
try:
Base.metadata.create_all(engine)
except sqlalchemy.exc.InvalidRequestError:
print "SQLAlchemy ERROR: SQLAlchemy was asked to do something it can't do"
except sqlalchemy.exc.DBAPIError, exc:
print "SQLAlchemy ERROR: %s", (exc)
except sqlalchemy.exc.SQLAlchemyError, exc:
print "SQLAlchemy ERROR: %s", (exc)
def initialize_db():
"""
This code inserting testing data into defined tables
"""
#insert test data
Session = sessionmaker(bind=engine)
session = Session()
test_dir = Catalog('test')
session.add(test_dir)
session.commit()
#test_file = File('test.txt', '123456.txt', hash('123456.txt'), 1024, 0, 1)
#test_file.server_id.append(test_server)
#session.add(test_file)
#session.commit()
test_fs = FileSpace('test')
session.add(test_fs)
session.commit()
test_acctype = AccountType('free', 0.00)
session.add(test_acctype)
session.commit()
test_group = Group('users', 1101)
session.add(test_group)
session.commit()
test_user = Users('relrin', 'Valery Savich', hash('123456'), 'some@mail.com', '01.01.2014', 1, 1, 1)
session.add(test_user)
session.commit()
session.close()
print "Insertion data has complete!"
print "Test query: Getting data from [Users] table\n"
connection = engine.connect()
result = engine.execute(text("select name, fullname, password from users"))
for row in result:
print "Users<name=%s, fullname=%s, password=%s>" % (row.name, row.fullname, row.password)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c", "--crtdb", dest='cdb', help = "Create database", default=False)
parser.add_option("-i", "--initdb", dest = "idb", help = "Initialize DB: insert test data", default=False)
(options, args) = parser.parse_args()
options.cdb = bool(options.cdb)
options.idb = bool(options.idb)
if options.cdb:
create_db()
if options.idb:
initialize_db()
| mit | Python |
|
712733ead5e36362fe6e2eca1235744c257c7f69 | Create helloWorld.py | WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17 | helloWorld.py | helloWorld.py | # programe in python
printf("Hello World!")
| mit | Python |
|
bf56a5afed926d7cdd536c1da8ba5b021a09bd95 | Test pipe framework | jni/skan | skan/test/test_pipe.py | skan/test/test_pipe.py | import os
import pytest
import pandas
from skan import pipe
@pytest.fixture
def image_filename():
rundir = os.path.abspath(os.path.dirname(__file__))
datadir = os.path.join(rundir, 'data')
return os.path.join(datadir, 'retic.tif')
def test_pipe(image_filename):
data = pipe.process_images([image_filename], 'fei', 5e-8, 0.1, 0.075,
'Scan/PixelHeight')
assert type(data) == pandas.DataFrame
assert data.shape[0] > 0
| bsd-3-clause | Python |
|
b663bf77fe60a108598db4ae8310e8877d06cddd | Add unit tests for core module | jniedrauer/dfman | tests/core_test.py | tests/core_test.py | """Test CLI module"""
import os
import sys
import tempfile
import unittest
from mock import mock_open, patch
from context import dfman
from dfman import config, const, core
class TestMainRuntime(unittest.TestCase):
@patch('dfman.core.Config')
@patch.object(dfman.core.MainRuntime, 'set_output_streams')
def test_run_initial_setup(self, _, mock_config):
mc_return = mock_config.return_value
# dry run and verbose are set to false with args
mc_return.getboolean.return_value = False
runtime = dfman.core.MainRuntime(False, False)
runtime.run_initial_setup()
self.assertFalse(runtime.dry_run)
self.assertFalse(runtime.verbose)
# verbose is set to true with config file but not with args
mc_return.getboolean.return_value = True
runtime.run_initial_setup()
self.assertTrue(runtime.verbose)
def test_get_distro(self):
test_os = \
b'''
NAME="Scary Linux"
ID=spooky
PRETTY_NAME="Spooky Scary Linux"
ANSI_COLOR="1;32"
'''
with tempfile.NamedTemporaryFile() as tmp:
tmp.write(test_os)
tmp.seek(0)
runtime = dfman.core.MainRuntime(False, False)
const.SYSTEMD_DISTINFO = tmp.name
self.assertEqual(runtime.get_distro(), 'spooky')
def test_get_overrides(self):
test_config = \
b'''
[Overrides]
file1 = dir1/file1
file2 = dir2/file2
[spooky]
file2 = distoverride/file2
'''
with tempfile.NamedTemporaryFile() as tmp:
tmp.write(test_config)
tmp.seek(0)
config = dfman.Config()
config.cfg_file = tmp.name
config.load_cfg()
runtime = dfman.core.MainRuntime(False, False)
runtime.config = config
runtime.distro = 'spooky'
overrides = runtime.get_overrides()
self.assertEqual(overrides['file1'], 'dir1/file1')
self.assertEqual(overrides['file2'], 'distoverride/file2')
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
be59230531d98dc25f806b2290a51a0f4fde1d3b | Rename model to prevent crash during module upgrade in tests | grap/OpenUpgrade,OpenUpgrade/OpenUpgrade,Endika/OpenUpgrade,Endika/OpenUpgrade,grap/OpenUpgrade,OpenUpgrade/OpenUpgrade,OpenUpgrade/OpenUpgrade,Endika/OpenUpgrade,grap/OpenUpgrade,grap/OpenUpgrade,Endika/OpenUpgrade,grap/OpenUpgrade,OpenUpgrade/OpenUpgrade,grap/OpenUpgrade,OpenUpgrade/OpenUpgrade,Endika/OpenUpgrade,Endika/OpenUpgrade,OpenUpgrade/OpenUpgrade,OpenUpgrade/OpenUpgrade,Endika/OpenUpgrade,grap/OpenUpgrade | addons/survey/migrations/8.0.2.0/pre-migration.py | addons/survey/migrations/8.0.2.0/pre-migration.py | # coding: utf-8
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(cr, version):
openupgrade.rename_tables(cr, [('survey', 'survey_survey')])
openupgrade.rename_models(cr, [('survey', 'survey.survey')])
| agpl-3.0 | Python |
|
a277a25014c250c04fabb669013305940c867abc | Introduce new variables | openfisca/country-template,openfisca/country-template | openfisca_country_template/variables/stats.py | openfisca_country_template/variables/stats.py | # -*- coding: utf-8 -*-
# This file defines the variables of our legislation.
# A variable is property of a person, or an entity (e.g. a household).
# See http://openfisca.org/doc/variables.html
# Import from openfisca-core the common python objects used to code the legislation in OpenFisca
from openfisca_core.model_api import *
# Import the entities specifically defined for this tax and benefit system
from openfisca_country_template.entities import *
class total_benefits(Variable):
column = FloatCol
entity = Household
definition_period = MONTH
label = "Sum of the benefits perceived by a household"
reference = "https://stats.gov.example/benefits"
def formula(household, period, parameters):
basic_income_i = household.members('basic_income', period) # Calculates the value of basic_income for each member of the household
return (
+ household.sum(basic_income_i) # Sum the household members basic incomes
+ household('housing_allowance', period)
)
class total_taxes(Variable):
column = FloatCol
entity = Household
definition_period = MONTH
label = "Sum of the taxes paid by a household"
reference = "https://stats.gov.example/taxes"
def formula(household, period, parameters):
income_tax_i = household.members('income_tax', period)
social_security_contribution_i = household.members('social_security_contribution', period)
return (
+ household.sum(income_tax_i)
+ household.sum(social_security_contribution_i)
+ household('housing_tax', period.this_year) / 12
)
| agpl-3.0 | Python |
|
becba80983c5f0f29f981eadcc79d4f496e1d28b | fix issue #2778 | hydroshare/hydroshare,hydroshare/hydroshare,hydroshare/hydroshare,hydroshare/hydroshare,hydroshare/hydroshare | theme/management/commands/fix_user_quota_model.py | theme/management/commands/fix_user_quota_model.py | from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from theme.models import UserQuota
class Command(BaseCommand):
help = "This commond can be run to fix the corrupt user data where some users do not " \
"have UserQuota foreign key relation. This management command can be run on a " \
"as-needed basis."
def handle(self, *args, **options):
users = User.objects.filter(is_active=True).filter(is_superuser=False).all()
hs_internal_zone = "hydroshare"
for u in users:
uq = UserQuota.objects.filter(user__username=u.username, zone=hs_internal_zone).first()
if not uq:
# create default UserQuota object for this user
new_uq = UserQuota.objects.create(user=u)
new_uq.save()
| bsd-3-clause | Python |
|
4f1cda8459cb6bca2e317bb582266fb43e78215c | Add test_manager_mixin module. | ulule/django-linguist | linguist/tests/test_manager_mixin.py | linguist/tests/test_manager_mixin.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .base import BaseTestCase
from ..models import Translation
from ..utils.i18n import get_cache_key
class ManagerMixinTest(BaseTestCase):
"""
Tests the Linguist's manager mixin.
"""
def setUp(self):
self.create_registry()
def test_set_instance_cache(self):
from ..mixins import set_instance_cache
translations = [self.translation_en, self.translation_fr]
set_instance_cache(self.instance, translations)
self.assertEqual(
self.instance.cached_translations_count,
Translation.objects.count())
def test_get_translation_lookups(self):
from ..mixins import get_translation_lookups
lookups = get_translation_lookups(self.instance)
self.assertEqual(lookups, {
'identifier': self.instance.identifier,
'object_id': self.instance.pk,
})
lookups = get_translation_lookups(self.instance, fields=['title', 'body'])
self.assertEqual(lookups, {
'identifier': self.instance.identifier,
'object_id': self.instance.pk,
'field_name__in': ['title', 'body'],
})
lookups = get_translation_lookups(self.instance, fields=['title'], languages=['en', 'fr'])
self.assertEqual(lookups, {
'identifier': self.instance.identifier,
'object_id': self.instance.pk,
'field_name__in': ['title'],
'language__in': ['en', 'fr'],
})
| mit | Python |
|
326249502d9884ea5717afff63b8a7caf60f6c2c | check in openstack healthcheck tool | zdw/xos,wathsalav/xos,cboling/xos,zdw/xos,opencord/xos,open-cloud/xos,xmaruto/mcord,open-cloud/xos,xmaruto/mcord,opencord/xos,cboling/xos,jermowery/xos,cboling/xos,xmaruto/mcord,wathsalav/xos,zdw/xos,cboling/xos,jermowery/xos,jermowery/xos,opencord/xos,cboling/xos,jermowery/xos,xmaruto/mcord,wathsalav/xos,open-cloud/xos,wathsalav/xos,zdw/xos | planetstack/tools/openstack-healthcheck.py | planetstack/tools/openstack-healthcheck.py | #! /usr/bin/python
import os
import sys
import subprocess
import time
def get_systemd_status(service):
p=subprocess.Popen(["/bin/systemctl", "is-active", service], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate()
out = out.strip()
return out
libvirt_enabled = os.system("systemctl -q is-enabled libvirtd.service")==0
nova_compute_enabled = os.system("systemctl -q is-enabled openstack-nova-compute.service")==0
openvswitch_agent_enabled = os.system("systemctl -q is-enabled quantum-openvswitch-agent.service")==0
print "enabled:"
print " libvirtd=", libvirt_enabled
print " openstack-nova-compute=", nova_compute_enabled
print " quantum-openvswitch-agent=", openvswitch_agent_enabled
if (not libvirt_enabled) or (not nova_compute_enabled) or (not openvswitch_agent_enabled):
print "services are not enabled. exiting"
sys.exit(0)
libvirt_status = get_systemd_status("libvirtd.service")
nova_compute_status = get_systemd_status("openstack-nova-compute.service")
openvswitch_agent_status = get_systemd_status("quantum-openvswitch-agent.service")
print "status:"
print " libvirtd=", libvirt_status
print " openstack-nova-compute=", nova_compute_status
print " quantum-openvswitch-agent=", openvswitch_agent_status
if (libvirt_status=="failed") or (nova_compute_status=="failed") or (openvswitch_agent_status=="failed"):
print "services have failed. doing the big restart"
os.system("systemctl stop openstack-nova-compute.service")
os.system("systemctl stop quantum-openvswitch-agent.service")
os.system("systemctl stop libvirtd.service")
time.sleep(5)
os.system("systemctl start libvirtd.service")
time.sleep(5)
os.system("systemctl start quantum-openvswitch-agent.service")
time.sleep(5)
os.system("systemctl start openstack-nova-compute.service")
print "done"
| apache-2.0 | Python |
|
0e5e3deb8a8250429ee7a1603e017343f6c7e3bb | Create a Testing Suite | in-toto/layout-web-tool,in-toto/layout-web-tool,in-toto/layout-web-tool | tests/run_tests.py | tests/run_tests.py | from unittest import defaultTestLoader, TextTestRunner
import sys
suite = defaultTestLoader.discover(start_dir=".")
result = TextTestRunner(verbosity=2, buffer=True).run(suite)
sys.exit(0 if result.wasSuccessful() else 1)
| mit | Python |
|
ecac8bc83491c9cb2312cf2a1c477c53c4832b4d | Add minimal dead code elimination | Inaimathi/pykit,Inaimathi/pykit,flypy/pykit,ContinuumIO/pykit,ContinuumIO/pykit,flypy/pykit | pykit/transform/dce.py | pykit/transform/dce.py | # -*- coding: utf-8 -*-
"""
Dead code elimination.
"""
from pykit.analysis import loop_detection
effect_free = set([
'alloca', 'load', 'new_list', 'new_tuple', 'new_dict', 'new_set',
'new_struct', 'new_data', 'new_exc', 'phi', 'exc_setup', 'exc_catch',
'ptrload', 'ptrcast', 'ptr_isnull', 'getfield', 'getindex',
'add', 'sub', 'mul', 'div', 'mod', 'lshift', 'rshift', 'bitand', 'bitor',
'bitxor', 'invert', 'not_', 'uadd', 'usub', 'eq', 'noteq', 'lt', 'lte',
'gt', 'gte', 'is_', 'addressof',
])
def dce(func, env=None):
"""
Eliminate dead code.
TODO: Prune branches, dead loops
"""
for op in func.ops:
if op.opcode in effect_free and len(func.uses[op]) == 0:
op.delete()
run = dce | bsd-3-clause | Python |
|
2fa7855de542bb5ecd303e26d1e9913687478589 | Set up test suite to ensure server admin routes are added. | sheagcraig/sal,sheagcraig/sal,sheagcraig/sal,salopensource/sal,sheagcraig/sal,salopensource/sal,salopensource/sal,salopensource/sal | server/tests/test_admin.py | server/tests/test_admin.py | """General functional tests for the API endpoints."""
from django.test import TestCase, Client
# from django.urls import reverse
from rest_framework import status
from server.models import ApiKey, User
# from api.v2.tests.tools import SalAPITestCase
class AdminTest(TestCase):
"""Test the admin site is configured to have all expected views."""
admin_endpoints = {
'apikey', 'businessunit', 'condition', 'fact', 'historicalfact',
'installedupdate', 'machinedetailplugin', 'machinegroup', 'machine',
'pendingappleupdate', 'pendingupdate', 'pluginscriptrow',
'pluginscriptsubmission', 'plugin', 'report', 'salsetting', 'updatehistoryitem',
'updatehistory', 'userprofile'}
def setUp(self):
self.client = Client()
self.user = User.objects.create(username='test')
def test_no_access(self):
"""Test that unauthenticated requests redirected to login."""
for path in self.admin_endpoints:
response = self.client.get('/admin/server/{}'.format(path))
# Redirect to login page.
self.assertEqual(response.status_code, status.HTTP_301_MOVED_PERMANENTLY)
def test_ro_access(self):
"""Test that ro requests are rejected.
RO users should not have access to the admin site (unless they have
`is_staff = True`.
"""
self.user.user_profile = 'RO'
self.user.save()
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = '/admin/server/{}/'.format(path)
response = self.client.get(url)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_302_FOUND, msg=msg)
self.assertEqual(response.url, '/admin/login/?next=/admin/server/{}/'.format(path),
msg=msg)
def test_ga_access(self):
"""Ensure GA userprofile grants admin page access."""
self.user.user_profile = 'GA'
self.user.save()
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = '/admin/server/{}/'.format(path)
response = self.client.get(url, follow=True)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=msg)
| apache-2.0 | Python |
|
38b12d0581e82ebb0e4fee8500bbd5d83d373afa | Create wikipedia-link-analysis-reducer.py | hardikvasa/hadoop-mapreduce-examples-python | wikipedia-link-analysis-reducer.py | wikipedia-link-analysis-reducer.py | mit | Python |
||
38f5c8534e3807d0485165017972adf47bd4aa2f | Create utils.py | duboviy/zca | utilities/utils.py | utilities/utils.py | from zope.interface import implements
from IOperation import IOperation
class Plus(object):
implements(IOperation)
def __call__(self, a, b):
return a + b
class Minus(object):
implements(IOperation)
def __call__(self, a, b):
return a - b
### alternative way to make utility component (using not class-adviser on class level -> using function classImplements)
# from zope.interface import classImplements
# classImplements(Host, IHost)
### also in Python 2.6 and later you can use class decorator @implementer(IFoo)
| mit | Python |
|
7801f5a34fed9c50ebd0d426a69f875026da9602 | Create tutorial2.py | anoushkaalavilli/empty-app | tutorial2.py | tutorial2.py | mit | Python |
||
0ddac190019753d77b1ed78dcd49ad7370d666df | add some utils | rdeits/iris,rdeits/iris,rdeits/iris,rdeits/iris | python/irispy/utils.py | python/irispy/utils.py | import numpy as np
import irispy
def lcon_to_vert(A, b):
poly = irispy.Polyhedron(A.shape[1])
poly.setA(A)
poly.setB(b)
V = np.vstack(poly.generatorPoints()).T
def sample_convex_polytope(A, b, nsamples):
poly = irispy.Polyhedron(A.shape[1])
poly.setA(A)
poly.setB(b)
generators = np.vstack(poly.generatorPoints())
lb = np.min(generators, axis=0)
ub = np.max(generators, axis=0)
n = 0
samples = np.zeros((len(lb), nsamples))
while n < nsamples:
z = np.random.uniform(lb, ub)
if np.all(poly.A.dot(z) <= poly.b):
samples[:,n] = z
n += 1
return samples | bsd-2-clause | Python |
|
538cd00a3c0307818cf62c61be3d91007a9b4091 | Add migration for movie.durations_in_s | streamr/marvin,streamr/marvin,streamr/marvin | migrations/versions/349d38252295_.py | migrations/versions/349d38252295_.py | """Add movie.duration_in_s
Revision ID: 349d38252295
Revises: 2b7f5e38dd73
Create Date: 2014-01-09 15:31:24.597000
"""
# revision identifiers, used by Alembic.
revision = '349d38252295'
down_revision = '2b7f5e38dd73'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('movie', sa.Column('duration_in_s', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('movie', 'duration_in_s')
### end Alembic commands ###
| mit | Python |
|
c8ad60f23bc630ba8e57f735c8aa0ec7eeaa3c1f | teste ggj18 | jrbitt/gamesresearch,jrbitt/gamesresearch | arquivo3.py | arquivo3.py | dasdsa
sdas
sdasd
asdasdas
s
dasdas
das
d
asd
as
das
das
das
d
sad
| apache-2.0 | Python |
|
c5bbbe4f6430ef20da55ea0f8039091d4f79c491 | Add script to update taking for all team owners | gratipay/gratipay.com,gratipay/gratipay.com,gratipay/gratipay.com,gratipay/gratipay.com | sql/branch.py | sql/branch.py | import sys
from gratipay import wireup
from gratipay.models.participant import Participant
db = wireup.db(wireup.env())
teams = db.all("""
SELECT t.*::teams
FROM teams t
""")
for team in teams:
print("Updating team %s" % team.slug)
Participant.from_username(team.owner).update_taking()
print("Done!")
| mit | Python |
|
74c58436c28fbca804cd70a88ca1250ca22aa8e6 | add test_poll.py | alphatwirl/alphatwirl,alphatwirl/alphatwirl,alphatwirl/alphatwirl,alphatwirl/alphatwirl | tests/unit/concurrently/condor/test_poll.py | tests/unit/concurrently/condor/test_poll.py | # Tai Sakuma <tai.sakuma@gmail.com>
import os
import sys
import logging
import textwrap
import collections
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
from alphatwirl.concurrently import WorkingArea
from alphatwirl.concurrently import HTCondorJobSubmitter
##__________________________________________________________________||
@pytest.fixture()
def mock_proc_condor_q():
ret = mock.Mock()
ret.returncode = 0
return ret
@pytest.fixture()
def mock_pipe(monkeypatch):
ret = mock.Mock()
module = sys.modules['alphatwirl.concurrently.exec_util']
monkeypatch.setattr(module.subprocess, 'PIPE', ret)
return ret
@pytest.fixture()
def mock_popen(monkeypatch, mock_proc_condor_q):
ret = mock.Mock()
ret.side_effect = [mock_proc_condor_q]
module = sys.modules['alphatwirl.concurrently.exec_util']
monkeypatch.setattr(module.subprocess, 'Popen', ret)
return ret
@pytest.fixture()
def obj(mock_popen):
return HTCondorJobSubmitter()
##__________________________________________________________________||
def test_poll(
obj, mock_popen, mock_pipe,
mock_proc_condor_q, caplog):
obj.clusterprocids_outstanding = ['3764857.0', '3764858.0', '3764858.1', '3764858.2']
stdout = '\n'.join(['3764857.0 2', '3764858.1 2', '3764858.2 1'])
mock_proc_condor_q.communicate.return_value = (stdout, '')
with caplog.at_level(logging.DEBUG):
ret = obj.poll()
# assert 6 == len(caplog.records)
#
assert ['3764857.0', '3764858.1', '3764858.2'] == obj.clusterprocids_outstanding
#
expected = ['3764858.0']
assert expected == ret
#
expected = [
['condor_q', '3764857', '3764858', '-format', '%d.', 'ClusterId', '-format', '%d ', 'ProcId', '-format', '%-2s\n', 'JobStatus']
]
procargs_list = [args[0] for args, kwargs in mock_popen.call_args_list]
assert expected == procargs_list
##__________________________________________________________________||
| bsd-3-clause | Python |
|
9967ade200639b584e379ec25030d1598071ffd3 | Create TextEditor.py | BrickText/BrickText | redactor/TextEditor.py | redactor/TextEditor.py | from tkinter import *
class TextEditor():
def __init__(self):
self.root = Tk()
self.root.wm_title("BrickText")
self.text_panel = Text(self.root)
self.text_panel.pack(side=RIGHT, fill=BOTH, expand=YES)
self.set_tabs()
def start(self):
self.root.mainloop()
def get_root(self):
return self.root
def get_text_panel(self):
return self.text_panel
def set_tabs(self):
f = font.Font(font=self.text_panel['font'])
tab_width = f.measure(' ' * 3)
self.text_panel.config(tabs=(tab_width,))
| mit | Python |
|
c037412566b0a0313216e49168a8ebcc831e0f9b | add hamshahri information extractor | sobhe/baaz,sobhe/baaz | hamshahri.py | hamshahri.py |
from hazm import sent_tokenize, word_tokenize, Normalizer, HamshahriReader, POSTagger, DependencyParser
from InformationExtractor import InformationExtractor
hamshahri = HamshahriReader('/home/alireza/Corpora/Hamshahri')
normalizer = Normalizer()
tagger = POSTagger()
parser = DependencyParser(tagger=tagger)
extractor = InformationExtractor()
output = open('informations.txt', 'w')
for text in hamshahri.texts():
text = normalizer.normalize(text)
sentences = [word_tokenize(sentence) for sentence in sent_tokenize(text)]
tagged = tagger.batch_tag(sentences)
parsed = parser.tagged_batch_parse(tagged)
for sentence in parsed:
print('\n', '*', *[node['word'] for node in sentence.nodelist if node['word']], file=output)
for information in extractor.extract(sentence):
print(*information, sep=' - ', file=output)
break
| mit | Python |
|
a7ece57eec28c771bcf2a23dc9c9e575223b1383 | add memory usage profiler script | kurtdawg24/robotframework,nmrao/robotframework,userzimmermann/robotframework,fingeronthebutton/robotframework,rwarren14/robotframework,kurtdawg24/robotframework,xiaokeng/robotframework,suvarnaraju/robotframework,rwarren14/robotframework,jorik041/robotframework,joongh/robotframework,yahman72/robotframework,wojciechtanski/robotframework,dkentw/robotframework,wojciechtanski/robotframework,yahman72/robotframework,userzimmermann/robotframework,ChrisHirsch/robotframework,jorik041/robotframework,wojciechtanski/robotframework,wojciechtanski/robotframework,edbrannin/robotframework,kyle1986/robortframe,edbrannin/robotframework,xiaokeng/robotframework,ChrisHirsch/robotframework,suvarnaraju/robotframework,JackNokia/robotframework,snyderr/robotframework,jaloren/robotframework,eric-stanley/robotframework,SivagnanamCiena/robotframework,kyle1986/robortframe,HelioGuilherme66/robotframework,nmrao/robotframework,joongh/robotframework,kurtdawg24/robotframework,fingeronthebutton/robotframework,dkentw/robotframework,ashishdeshpande/robotframework,yonglehou/robotframework,yonglehou/robotframework,snyderr/robotframework,robotframework/robotframework,edbrannin/robotframework,fingeronthebutton/robotframework,yonglehou/robotframework,xiaokeng/robotframework,yahman72/robotframework,moto-timo/robotframework,ashishdeshpande/robotframework,un33k/robotframework,JackNokia/robotframework,snyderr/robotframework,Colorfulstan/robotframework,ChrisHirsch/robotframework,fingeronthebutton/robotframework,wojciechtanski/robotframework,jaloren/robotframework,stasiek/robotframework,nmrao/robotframework,alexandrul-ci/robotframework,yonglehou/robotframework,xiaokeng/robotframework,alexandrul-ci/robotframework,stasiek/robotframework,JackNokia/robotframework,synsun/robotframework,HelioGuilherme66/robotframework,dkentw/robotframework,moto-timo/robotframework,snyderr/robotframework,Colorfulstan/robotframework,edbrannin/robotframework,eric-stanley/robotframework,SivagnanamCiena/robotframework,moto-timo/robotframework,kyle1986/robortframe,moto-timo/robotframework,stasiek/robotframework,Colorfulstan/robotframework,un33k/robotframework,ChrisHirsch/robotframework,un33k/robotframework,jaloren/robotframework,HelioGuilherme66/robotframework,un33k/robotframework,Colorfulstan/robotframework,robotframework/robotframework,nmrao/robotframework,synsun/robotframework,ChrisHirsch/robotframework,JackNokia/robotframework,moto-timo/robotframework,joongh/robotframework,ashishdeshpande/robotframework,synsun/robotframework,rwarren14/robotframework,synsun/robotframework,userzimmermann/robotframework,userzimmermann/robotframework,Colorfulstan/robotframework,stasiek/robotframework,ashishdeshpande/robotframework,SivagnanamCiena/robotframework,dkentw/robotframework,eric-stanley/robotframework,jorik041/robotframework,nmrao/robotframework,robotframework/robotframework,dkentw/robotframework,jaloren/robotframework,alexandrul-ci/robotframework,joongh/robotframework,kyle1986/robortframe,eric-stanley/robotframework,yahman72/robotframework,stasiek/robotframework,kyle1986/robortframe,suvarnaraju/robotframework,fingeronthebutton/robotframework,ashishdeshpande/robotframework,rwarren14/robotframework,synsun/robotframework,xiaokeng/robotframework,jorik041/robotframework,jorik041/robotframework,kurtdawg24/robotframework,edbrannin/robotframework,jaloren/robotframework,JackNokia/robotframework,suvarnaraju/robotframework,kurtdawg24/robotframework,userzimmermann/robotframework,alexandrul-ci/robotframework,yonglehou/robotframework,SivagnanamCiena/robotframework,un33k/robotframework,yahman72/robotframework,rwarren14/robotframework,joongh/robotframework,snyderr/robotframework,SivagnanamCiena/robotframework,alexandrul-ci/robotframework,suvarnaraju/robotframework | proto/memory_test/calculate_rebot_model.py | proto/memory_test/calculate_rebot_model.py | # Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'src'))
from robot.result.builders import ResultFromXML
try:
import psutil
import objgraph
except ImportError:
print """
Please install psutil and objgraph - this script does not work without them.
"""
raise
def calculate_rebot_model(output_path):
xml = ResultFromXML(output_path)
p = psutil.Process(os.getpid())
print 'Process memory usage after xml parsing %f M' % (float(p.get_memory_info().rss) / (1024**2))
print 'Most common types'
objgraph.show_most_common_types()
return xml
if __name__ == '__main__':
if len(sys.argv) < 2:
print """
Simple memory profiler for robot output xml parsing.
Calculates memory usages after result model has been created.
usage:
calculate_rebot_model.py [PATH_TO_OUTPUT_XML]
"""
else:
calculate_rebot_model(sys.argv[1])
| apache-2.0 | Python |
|
680fb0bc3190acbb0bfd32f937d0e29b5641a1f2 | Create Strongly_Connect_Graph.py | UmassJin/Leetcode | Algorithm/Strongly_Connect_Graph.py | Algorithm/Strongly_Connect_Graph.py | # http://www.geeksforgeeks.org/strongly-connected-components/
# http://www.geeksforgeeks.org/connectivity-in-a-directed-graph/
'''
Given a directed graph, find out whether the graph is strongly connected or not. A directed graph is strongly connected if there is a path between any two pair of vertices. For example, following is a strongly connected graph.
`对于无向图来说判断连通非常简单,只需要做一次搜索,然后判断路径中是否经过了每个节点即可。但是有向图不可以这么做,比如上图,从节点0开始搜索可以经过所有节点,但是很明显它不是strongly connected。
Naive的方法是,对每一个节点做一次DFS,如果存在一个节点在DFS中没有经过每一个节点,那么则不是强连通图。这个算法复杂度是O(V*(V+E))。
或者用Floyd Warshall算法来找出任意两个节点的最短路径。复杂度是O(v3)。
一个更好的办法是强连通分量算法Strongly Connected Components (SCC) algorithm。我们可以用O(V+E)时间复杂度找出一个图中所有的SCC。如果SCC只有一个,那么这个图就是强连通图。
用Kosaraju算法,两个pass做DFS:
开一个visited数组,标记所有点为unvisited.
从任意顶点V走一次DFS,如果没有访问到所有顶点则返回false。
将所有边reverse。
把reverse后的图的所有定点重新标记为unvisited。
继续对新图走一次DFS,起点跟2中的顶点V。如果DFS没有访问到所有点则返回false,否则返回true。
'''
| mit | Python |
|
2e985972aa4aad94bfda25ba852326b39498e4fa | Create Unique_Binary_Search_Trees.py | UmassJin/Leetcode | Array/Unique_Binary_Search_Trees.py | Array/Unique_Binary_Search_Trees.py | Given n, how many structurally unique BST's (binary search trees) that store values 1...n?
For example,
Given n = 3, there are a total of 5 unique BST's.
1 3 3 2 1
\ / / / \ \
3 2 1 1 3 2
/ / \ \
2 1 2 3
class Solution:
# @return an integer
# Recursion (172ms)
def numTrees_1(self, n):
if n <= 1: return 1
result = 0
for i in xrange(1,n+1):
result += self.numTrees(i-1)*self.numTrees(n-i)
return result
# DP (46ms)
def numTrees(self, n):
result = [0 for i in xrange(n+1)]
result[0] = 1; result[1] = 1
for i in xrange(2, n+1):
for j in xrange(1, n+1):
result[i] += result[j-1]*result[i-j]
return result[n]
# status: result[i]: the number of unique BST for a sequence of length i.
# initialize: result[0]= 1; result[1] = 1, only one combination to construct a BST out of a sequence
# function:
result[n] = F(1,n) + F[2,n] +...F[n,n]
F[i, n]: the number of unique BST, where the number i is the root of BST, and the sequence ranges from 1 to n.
F[i, n] = result[i-1] * result[n-i] 1<= i <= n
result[n] = result[0]*result[n-1] + result[1]*result[n-2]+..+result[n-1]*result[0]
# result: result[n]
| mit | Python |
|
cd59d45813fbc23d76e1e9d12cf46d7df37d72c3 | Add remote_fs unittest (#410) | appium/python-client,appium/python-client | test/unit/webdriver/device/remote_fs_test.py | test/unit/webdriver/device/remote_fs_test.py | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import httpretty
import pytest
from selenium.common.exceptions import InvalidArgumentException
from appium.common.helper import appium_bytes
from appium.webdriver.webdriver import WebDriver
from test.unit.helper.test_helper import (
android_w3c_driver,
appium_command,
get_httpretty_request_body
)
class TestWebDriverRemoteFs(object):
@httpretty.activate
def test_push_file(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/path/to/file.txt'
data = base64.b64encode(appium_bytes('HelloWorld', 'utf-8')).decode('utf-8')
assert isinstance(driver.push_file(dest_path, data), WebDriver)
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
assert d['data'] == str(data)
@httpretty.activate
def test_push_file_invalid_arg_exception_without_src_path_and_base64data(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/path/to/file.txt'
with pytest.raises(InvalidArgumentException):
driver.push_file(dest_path)
@httpretty.activate
def test_push_file_invalid_arg_exception_with_src_file_not_found(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/dest_path/to/file.txt'
src_path = '/src_path/to/file.txt'
with pytest.raises(InvalidArgumentException):
driver.push_file(dest_path, source_path=src_path)
@httpretty.activate
def test_pull_file(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/pull_file'),
body='{"value": "SGVsbG9Xb3JsZA=="}'
)
dest_path = '/path/to/file.txt'
assert driver.pull_file(dest_path) == str(base64.b64encode(appium_bytes('HelloWorld', 'utf-8')).decode('utf-8'))
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
@httpretty.activate
def test_pull_folder(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/pull_folder'),
body='{"value": "base64EncodedZippedFolderData"}'
)
dest_path = '/path/to/file.txt'
assert driver.pull_folder(dest_path) == 'base64EncodedZippedFolderData'
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
| apache-2.0 | Python |
|
f23c77d517dd88c38d5ad8fa0601bc61ccf17aa6 | Change url from 2016 to 2017 | pyvec/cz.pycon.org-2017,benabraham/cz.pycon.org-2017,pyvec/cz.pycon.org-2017,benabraham/cz.pycon.org-2017,benabraham/cz.pycon.org-2017,pyvec/cz.pycon.org-2017 | pyconcz_2017/urls.py | pyconcz_2017/urls.py | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView, RedirectView
from pyconcz_2017.common.views import homepage
prefixed_urlpatterns = [
url(r'^$', homepage, name='homepage'),
url(r'^announcements/', include('pyconcz_2017.announcements.urls')),
url(r'^proposals/workshops/$', RedirectView.as_view(url='/2017/proposals/talks')),
url(r'^proposals/', include('pyconcz_2017.proposals.urls')),
url(r'^about/team/', include('pyconcz_2017.team.urls')),
url(r'^speakers/', include('pyconcz_2017.speakers.urls')),
url(r'^sponsors/', include('pyconcz_2017.sponsors.urls')),
# static pages
url(r'^about/$',
TemplateView.as_view(template_name='pages/about.html'),
name='about'),
url(r'^about/code/$',
TemplateView.as_view(template_name='pages/code.html'),
name='about_code'),
url(r'^about/transparency_report/$',
TemplateView.as_view(template_name='pages/transparency.html'),
name='about_transparency'),
url(r'^about/brno/$',
TemplateView.as_view(template_name='pages/brno.html'),
name='about_brno'),
]
urlpatterns = (
static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) +
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) +
[
url(r'^2017/', include(prefixed_urlpatterns)),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', RedirectView.as_view(url='/2017/')),
]
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView, RedirectView
from pyconcz_2017.common.views import homepage
prefixed_urlpatterns = [
url(r'^$', homepage, name='homepage'),
url(r'^announcements/', include('pyconcz_2017.announcements.urls')),
url(r'^proposals/workshops/$', RedirectView.as_view(url='/2016/proposals/talks')),
url(r'^proposals/', include('pyconcz_2017.proposals.urls')),
url(r'^about/team/', include('pyconcz_2017.team.urls')),
url(r'^speakers/', include('pyconcz_2017.speakers.urls')),
url(r'^sponsors/', include('pyconcz_2017.sponsors.urls')),
# static pages
url(r'^about/$',
TemplateView.as_view(template_name='pages/about.html'),
name='about'),
url(r'^about/code/$',
TemplateView.as_view(template_name='pages/code.html'),
name='about_code'),
url(r'^about/transparency_report/$',
TemplateView.as_view(template_name='pages/transparency.html'),
name='about_transparency'),
url(r'^about/brno/$',
TemplateView.as_view(template_name='pages/brno.html'),
name='about_brno'),
]
urlpatterns = (
static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) +
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) +
[
url(r'^2016/', include(prefixed_urlpatterns)),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', RedirectView.as_view(url='/2016/')),
]
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| mit | Python |
5becb57514c4b08fc7af2a9a4e38b2c8aac2f576 | Create computestats.py | Effective-Quadratures/Effective-Quadratures,psesh/Effective-Quadratures | effective_quadratures/computestats.py | effective_quadratures/computestats.py | #!/usr/bin/env python
import numpy as np
from utils import error_function
class Statistics(object):
"""
This subclass is an domains.ActiveVariableMap specifically for optimization.
**See Also**
optimizers.BoundedMinVariableMap
optimizers.UnboundedMinVariableMap
**Notes**
This class's train function fits a global quadratic surrogate model to the
n+2 active variables---two more than the dimension of the active subspace.
This quadratic surrogate is used to map points in the space of active
variables back to the simulation parameter space for minimization.
"""
# constructor
def __init__(self, coefficients, index_set):
self.coefficients = coefficients
self.index_set = index_set
def getMean(self):
"""
Train the global quadratic for the regularization.
:param ndarray Y: N-by-n matrix of points in the space of active
variables.
:param int N: merely there satisfy the interface of `regularize_z`. It
should not be anything other than 1.
:return: Z, N-by-(m-n)-by-1 matrix that contains a value of the inactive
variables for each value of the inactive variables.
:rtype: ndarray
**Notes**
In contrast to the `regularize_z` in BoundedActiveVariableMap and
UnboundedActiveVariableMap, this implementation of `regularize_z` uses
a quadratic program to find a single value of the inactive variables
for each value of the active variables.
"""
coefficients = self.coefficients
mean = coefficients[0,0]
return mean
def getVariance(self):
coefficients = self.coefficients
m, n = coefficients.shape
if m > n:
coefficients = coefficients.T
variance = np.sum(coefficients[0][1:m]**2)
return variance
# Function that computes first order Sobol' indices
def getFirstOrderSobol(self):
coefficients = self.coefficients
m, n = coefficients.shape
if m > n:
coefficients = coefficients.T
index_set = self.index_set
# Allocate memory!
index_set = index_set.getIndexSet()
index_set = np.mat(index_set)
m, dimensions = index_set.shape
variance = self.getVariance()
if dimensions == 1:
utils.error_function('ERROR: Sobol indices can only be computed for parameter studies with more than one parameter')
else:
index_set_entries = m
local_variance = np.zeros((index_set_entries, dimensions))
first_order_sobol_indices = np.zeros((dimensions))
# Loop for computing marginal variances!
for j in range(0, dimensions):
for i in range(0, index_set_entries): # no. of rows
# If the index_set[0,j] is not zero but the remaining are...
remaining_indices = np.arange(0, dimensions)
remaining_indices = np.delete(remaining_indices, j)
if(index_set[i,j] != 0 and np.sum(index_set[i, remaining_indices] ) == 0):
local_variance[i, j] = coefficients[0][i]
# Now take the sum of the squares of all the columns
for j in range(0, dimensions):
first_order_sobol_indices[j] = (np.sum(local_variance[:,j]**2))/(variance)
return first_order_sobol_indices
| lgpl-2.1 | Python |
|
36d0fc3c54dc0c91196c16875c1b1e2d9b0d38ea | Add basic unit test for LimitOffsetPagination | martinmaillard/django-rest-framework-json-api,abdulhaq-e/django-rest-framework-json-api,django-json-api/django-rest-framework-json-api,pombredanne/django-rest-framework-json-api,scottfisk/django-rest-framework-json-api,django-json-api/rest_framework_ember,django-json-api/django-rest-framework-json-api,leo-naeka/rest_framework_ember,leo-naeka/django-rest-framework-json-api,Instawork/django-rest-framework-json-api | example/tests/unit/test_pagination.py | example/tests/unit/test_pagination.py | from collections import OrderedDict
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from rest_framework.utils.urls import replace_query_param
from rest_framework_json_api.pagination import LimitOffsetPagination
factory = APIRequestFactory()
class TestLimitOffset:
"""
Unit tests for `pagination.LimitOffsetPagination`.
"""
def setup(self):
class ExamplePagination(LimitOffsetPagination):
default_limit = 10
max_limit = 15
self.pagination = ExamplePagination()
self.queryset = range(1, 101)
self.base_url = 'http://testserver/'
def paginate_queryset(self, request):
return list(self.pagination.paginate_queryset(self.queryset, request))
def get_paginated_content(self, queryset):
response = self.pagination.get_paginated_response(queryset)
return response.data
def get_test_request(self, arguments):
return Request(factory.get('/', arguments))
def test_valid_offset_limit(self):
"""
Basic test, assumes offset and limit are given.
"""
offset = 10
limit = 5
count = len(self.queryset)
last_offset = count - limit
next_offset = 15
prev_offset = 5
request = self.get_test_request({
self.pagination.limit_query_param: limit,
self.pagination.offset_query_param: offset
})
base_url = replace_query_param(self.base_url, self.pagination.limit_query_param, limit)
last_url = replace_query_param(base_url, self.pagination.offset_query_param, last_offset)
first_url = base_url
next_url = replace_query_param(base_url, self.pagination.offset_query_param, next_offset)
prev_url = replace_query_param(base_url, self.pagination.offset_query_param, prev_offset)
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
next_offset = offset + limit
expected_content = {
'results': list(range(offset + 1, next_offset + 1)),
'links': OrderedDict([
('first', first_url),
('last', last_url),
('next', next_url),
('prev', prev_url),
]),
'meta': {
'pagination': OrderedDict([
('count', count),
('limit', limit),
('offset', offset),
])
}
}
assert queryset == list(range(offset + 1, next_offset + 1))
assert content == expected_content
| bsd-2-clause | Python |
|
1eed076cc9140d35cd6897ef2bcb5fe0ae943e35 | Revert "remove bindings" | curioussavage/my-usage,arunoda/node-usage,curioussavage/my-usage,arunoda/node-usage,Discountrobot/node-usage,Wyliodrin/node-usage,Wyliodrin/node-usage,curioussavage/my-usage,Discountrobot/node-usage,Discountrobot/node-usage,Discountrobot/node-usage,arunoda/node-usage,Wyliodrin/node-usage,curioussavage/my-usage,arunoda/node-usage,Wyliodrin/node-usage,curioussavage/my-usage,arunoda/node-usage,Wyliodrin/node-usage | binding.gyp | binding.gyp | {
'targets': [
{
'target_name': 'sysinfo',
'conditions': [
['OS=="solaris"', {
'sources': [
'src/solaris.cpp'
]
}]
],
'sources': [
'src/binding.cpp',
],
'linkflags': [
'-Lbuild/cd Release/obj.target/sysinfo/src/'
],
'defines': [
'OS="<(OS)"',
'is_<(OS)'
],
}
]
}
| mit | Python |
|
9a83e01b9710943c50f80c8ffc4e5d5827cb3b92 | Check data preparation | shawpan/vehicle-detector | main.py | main.py | from car_classifier import CarClassifier
if __name__ == "__main__":
car_img_dir = 'vehicles'
not_car_img_dir = 'non-vehicles'
sample_size = 8792
car_classifier = CarClassifier(car_img_dir=car_img_dir,
not_car_img_dir=not_car_img_dir,
sample_size = sample_size)
car_classifier.fit()
| mit | Python |
|
c99bee3628e55873e5bb9b6e98fd0455b6b45c64 | add examples for recipe 1.14 | ordinary-developer/book_python_cookbook_3_ed_d_beazley_b_k_jones | code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/14-sorting_objects_without_native_comparison_support/main.py | code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/14-sorting_objects_without_native_comparison_support/main.py | def example_1():
class User:
def __init__(self, user_id):
self.user_id = user_id
def __repr__(self):
return 'User({})'.format(self.user_id)
users = [User(23), User(3), User(99)]
print(users)
print(sorted(users, key = lambda u: u.user_id))
from operator import attrgetter
print(sorted(users, key = attrgetter('user_id')))
print(min(users, key = attrgetter('user_id')))
print(max(users, key = attrgetter('user_id')))
if __name__ == '__main__':
example_1()
| mit | Python |
|
836d4ed6a3ddda4d381345a34358714db74af757 | Add an helper push program | ivoire/ReactOBus,ivoire/ReactOBus | share/examples/push.py | share/examples/push.py | import sys
import zmq
from zmq.utils.strtypes import b
def main():
# Get the arguments
if len(sys.argv) != 4:
print("Usage: push.py url topic num_messages")
sys.exit(1)
url = sys.argv[1]
topic = sys.argv[2]
num_messages = int(sys.argv[3])
# Create the socket
context = zmq.Context()
sock = context.socket(zmq.PUSH)
sock.connect(url)
for i in range(0, num_messages):
sock.send_multipart([b(topic), b("id"), b(str(i))])
if __name__ == "__main__":
main()
| agpl-3.0 | Python |
|
cc7ecc419f75fa672ff215e7c6157bac8ebfb29e | Add union-find implementation | nitsas/py3datastructs | unionfind.py | unionfind.py | """
A simple Union-Find data structure implementation.
author:
Christos Nitsas
(chrisn654 or nitsas)
language:
Python 3(.4)
date:
July, 2014
"""
class UnionFindSimpleImpl:
"""
A simple Union-Find data structure implementation.
If n is the number of items in the structure, a series of m union
operations will take O(m * log(n)) time. Find operations are (amortized)
constant time (O(1)) though.
"""
def __init__(self, items):
"""Initialize the Union-Find structure from an iterable."""
self._items = set(items)
self._leader = dict()
self._followers = dict()
self._cluster_size = dict()
for item in self._items:
self._leader[item] = item
self._followers[item] = [item]
self._cluster_size[item] = 1
def __getitem__(self, item):
"""
Returns the cluster (i.e. the cluster's leader) that the
given item belongs to.
Equivalent to UnionFindStructure.find().
"""
return self._leader[item]
def find(self, item):
"""
Returns the cluster (i.e. the cluster's leader) that the
given item belongs to.
Equivalent to UnionFindStructure.__getitem__().
"""
return self[item]
def union(self, leader_A, leader_B):
"""
Joins together the two clusters that items leader_A
and leader_B represent.
"""
if leader_A == leader_B:
return
if self._cluster_size[leader_B] > self._cluster_size[leader_A]:
leader_A, leader_B = leader_B, leader_A
for follower in self._followers[leader_B]:
self._leader[follower] = leader_A
self._followers[leader_A].extend(self._followers[leader_B])
del(self._followers[leader_B])
self._cluster_size[leader_A] += self._cluster_size[leader_B]
del(self._cluster_size[leader_B])
def num_clusters(self):
"""Returns the current number of clusters."""
return len(self._cluster_size)
def items(self):
"""Returns a set containing all the items in the structure."""
return self._items
class UnionFindUnionByRankAndPathCompression:
"""
A faster Union-Find implementation with lazy unions (using union by
rank) and path compression.
A series of m union & find operations on a structure with n items
will need time O(m * a(n)), where a(n) is the reverse Ackerman
function.
"""
def __init__(self, items):
"""Initialize the Union-Find structure from an iterable."""
raise(NotImplementedError)
def __getitem__(self, item):
"""
Returns the cluster (i.e. the cluster's leader) that the
given item belongs to.
Equivalent to UnionFindStructure.find().
"""
raise(NotImplementedError)
def find(self, item):
"""
Returns the cluster (i.e. the cluster's leader) that the
given item belongs to.
Equivalent to UnionFindStructure.__getitem__().
"""
raise(NotImplementedError)
def union(self, leader_A, leader_B):
"""
Joins together the two clusters that items leader_A
and leader_B represent.
"""
raise(NotImplementedError)
def num_clusters(self):
"""Returns the current number of clusters."""
raise(NotImplementedError)
def items(self):
"""Returns a set containing all the items in the structure."""
raise(NotImplementedError)
_default_impl = UnionFindSimpleImpl
class UnionFindStructure:
"""
A Union-Find data structure interface.
It relies on a concrete Union-Find implementation such as
UnionFindSimpleImpl or UnionFindLazyUnionsAndPathCompressionImpl.
"""
def __init__(self, items, *, impl=_default_impl):
self._impl = impl(items)
def __getitem__(self, item):
return self._impl.__getitem__(item)
def __getattr__(self, name):
return getattr(self._impl, name)
| mit | Python |
|
a4924a6928facdda942844b1bac8f0a53eb9ff4b | add 1 OPP file: slots | wuchengang/PythonLearing,wuchengang/PythonLearing | use_slots.py | use_slots.py | #!/user/bin/env python3
# -*- coding: utf-8 -*-
class Student(object):
_slots_ = ('name', 'age')
class GraduateStudent(Student):
pass
s = Student()
s.name = 'Michael'
s.age = 15
try:
s.score = 88
except AttributeError as e:
print('AttributeError:', e)
g = GraduateStudent()
g.score = 99
print(g.score)
| apache-2.0 | Python |
|
b6b2f268693764deb70553b00904af4aa6def15f | add lamp_genie.py - aladin 오프라인 매장을 검색해서 키워드의 책 ISBN번호를 알려준다. | PyLadiesSeoul/LampGenie | lamp_genie.py | lamp_genie.py | #-*- coding: utf-8 -*-
import requests
import BeautifulSoup
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
mobile_site_url = "http://www.aladin.co.kr"
search_url = "http://off.aladin.co.kr/usedstore/wsearchresult.aspx?SearchWord=%s&x=0&y=0"
book_url = "http://off.aladin.co.kr/usedstore/wproduct.aspx?ISBN=%d"
response = requests.get(mobile_site_url + '/m/off/gate.aspx?')
content = response.content
search_text = requests.utils.quote(raw_input("검색할 책 제목이나 글쓴이 : ").encode('cp949'))
shop_list = BeautifulSoup.BeautifulSoup(content).findAll('td')
s = requests.Session()
for x in shop_list:
print "=" * 50
try:
shop_location = x.text
url = x.find('a')
response = s.get(mobile_site_url + url['href'])
url = search_url % search_text
print url
response = s.get(url)
content = response.content
result = BeautifulSoup.BeautifulSoup(content).find('div', {'id':'Search3_Result'})
try:
result_list = set()
for x in result.findAll('a'):
search_code = str(x).split('ISBN=')
if search_code.__len__() > 1:
isbn = search_code[1].split('"')[0]
result_list.add(isbn)
print shop_location, result_list
except:
print set()
except Exception as e:
pass
| mit | Python |
|
13c14b8c2b44d2f9b39e46d395fcde891ba6ba9f | Patch #670715: Universal Unicode Codec for POSIX iconv. | sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator | Lib/test/test_iconv_codecs.py | Lib/test/test_iconv_codecs.py | from test import test_support
import unittest, sys
import codecs, _iconv_codec
from encodings import iconv_codec
from StringIO import StringIO
class IconvCodecTest(unittest.TestCase):
if sys.byteorder == 'big':
spam = '\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
else:
spam = 's\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
def test_sane(self):
self.encoder, self.decoder, self.reader, self.writer = \
codecs.lookup(_iconv_codec.internal_encoding)
self.assertEqual(self.decoder(self.spam), (u'spamspam', 16))
self.assertEqual(self.encoder(u'spamspam'), (self.spam, 8))
self.assertEqual(self.reader(StringIO(self.spam)).read(), u'spamspam')
f = StringIO()
self.writer(f).write(u'spamspam')
self.assertEqual(f.getvalue(), self.spam)
def test_basic_errors(self):
self.encoder, self.decoder, self.reader, self.writer = \
iconv_codec.lookup("ascii")
def testencerror(errors):
return self.encoder(u'sp\ufffdam', errors)
def testdecerror(errors):
return self.decoder('sp\xffam', errors)
self.assertRaises(UnicodeEncodeError, testencerror, 'strict')
self.assertRaises(UnicodeDecodeError, testdecerror, 'strict')
self.assertEqual(testencerror('replace'), ('sp?am', 5))
self.assertEqual(testdecerror('replace'), (u'sp\ufffdam', 5))
self.assertEqual(testencerror('ignore'), ('spam', 5))
self.assertEqual(testdecerror('ignore'), (u'spam', 5))
def test_pep293_errors(self):
self.encoder, self.decoder, self.reader, self.writer = \
iconv_codec.lookup("ascii")
def testencerror(errors):
return self.encoder(u'sp\ufffdam', errors)
def testdecerror(errors):
return self.decoder('sp\xffam', errors)
self.assertEqual(testencerror('xmlcharrefreplace'),
('sp�am', 5))
self.assertEqual(testencerror('backslashreplace'),
('sp\\ufffdam', 5))
def error_bomb(exc):
return (u'*'*40, len(exc.object))
def error_mock(exc):
error_mock.lastexc = exc
return (unicode(exc.object[exc.start - 1]), exc.end)
codecs.register_error('test_iconv_codec.bomb', error_bomb)
codecs.register_error('test_iconv_codec.mock', error_mock)
self.assertEqual(testencerror('test_iconv_codec.bomb'),
('sp' + ('*'*40), 5))
self.assertEqual(testdecerror('test_iconv_codec.bomb'),
(u'sp' + (u'*'*40), 5))
self.assertEqual(testencerror('test_iconv_codec.mock'), ('sppam', 5))
exc = error_mock.lastexc
self.assertEqual(exc.object, u'sp\ufffdam')
self.assertEqual(exc.start, 2)
self.assertEqual(exc.end, 3)
self.assert_(isinstance(exc, UnicodeEncodeError))
self.assertEqual(testdecerror('test_iconv_codec.mock'), (u'sppam', 5))
exc = error_mock.lastexc
self.assertEqual(exc.object, 'sp\xffam')
self.assertEqual(exc.start, 2)
self.assertEqual(exc.end, 3)
self.assert_(isinstance(exc, UnicodeDecodeError))
def test_empty_escape_decode(self):
self.encoder, self.decoder, self.reader, self.writer = \
iconv_codec.lookup("ascii")
self.assertEquals(self.decoder(u""), ("", 0))
self.assertEquals(self.encoder(""), (u"", 0))
def test_main():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(IconvCodecTest))
test_support.run_suite(suite)
if __name__ == "__main__":
test_main()
# ex: ts=8 sts=4 et
| mit | Python |
|
de4e54e1de5905600d539df781994612f03e0672 | Add files via upload | OswinGuai/GenerateAdjacency | matrix.py | matrix.py | import numpy as np
def parse_to_matrix(input_file_path, div = '\t', data_type = int):
input_file = open(input_file_path, 'r')
matrix = [ map(data_type,line.strip().split('%s' % div)) for line in input_file if line.strip() != "" ]
input_file.close()
return np.array(matrix)
def parse_to_vectors(input_file_path, div = '\t', data_type = int):
input_file = open(input_file_path, 'r')
matrix = [ map(data_type,line.strip().split('%s' % div)) for line in input_file if line.strip() != "" ]
input_file.close()
return np.array(matrix)
def write_matrix_into_file(matrix, output_file_path):
output = open(output_file_path, 'w')
size = len(matrix)
for row_i in range(size):
vec = matrix[row_i]
output.write('%s' % ' '.join(str(i) for i in vec))
output.write('\n')
output.close()
def write_matrix_into_file(matrix, heads, output_file_path):
output = open(output_file_path, 'w')
size = len(matrix)
for row_i in range(size):
vec = matrix[row_i]
head = heads[row_i]
output.write('%s' % head)
output.write(' ')
output.write('%s' % ' '.join(str(i) for i in vec))
output.write('\n')
output.close()
| apache-2.0 | Python |
|
42e485b7367e1a707a73b834f39fc6d3f356b61d | remove valid config check | mgerhardy/fips,michaKFromParis/fips,michaKFromParis/fips,floooh/fips,code-disaster/fips,floooh/fips,anthraxx/fips,floooh/fips,anthraxx/fips,code-disaster/fips,mgerhardy/fips | verbs/gdb.py | verbs/gdb.py | """implement 'gdb' verb (debugs a single target with gdb)
gdb
gdb [target]
gdb [target] [config]
"""
import subprocess
from mod import log, util, config, project, settings
#-------------------------------------------------------------------------------
def gdb(fips_dir, proj_dir, cfg_name, target=None) :
"""debug a single target with gdb"""
# prepare
proj_name = util.get_project_name_from_dir(proj_dir)
util.ensure_valid_project_dir(proj_dir)
# load the config(s)
configs = config.load(fips_dir, proj_dir, cfg_name)
if configs :
for cfg in configs :
# check if config is valid
config_valid, _ = config.check_config_valid(fips_dir, cfg, print_errors = True)
if config_valid :
deploy_dir = util.get_deploy_dir(fips_dir, proj_name, cfg)
log.colored(log.YELLOW, "=== gdb: {}".format(cfg['name']))
cmdLine = ['gdb', target]
subprocess.call(args = cmdLine, cwd = deploy_dir)
else :
log.error("Config '{}' not valid in this environment".format(cfg['name']))
else :
log.error("No valid configs found for '{}'".format(cfg_name))
return True
#-------------------------------------------------------------------------------
def run(fips_dir, proj_dir, args) :
"""debug a single target with gdb"""
if not util.is_valid_project_dir(proj_dir) :
log.error('must be run in a project directory')
tgt_name = None
cfg_name = None
if len(args) > 0 :
tgt_name = args[0]
if len(args) > 1:
cfg_name = args[1]
if not cfg_name :
cfg_name = settings.get(proj_dir, 'config')
if not tgt_name :
tgt_name = settings.get(proj_dir, 'target')
if not tgt_name :
log.error('no target specified')
gdb(fips_dir, proj_dir, cfg_name, tgt_name)
#-------------------------------------------------------------------------------
def help() :
"""print 'gdb' help"""
log.info(log.YELLOW +
"fips gdb\n"
"fips gdb [target]\n"
"fips gdb [target] [config]\n" + log.DEF +
" debug a single target in current or named config")
| """implement 'gdb' verb (debugs a single target with gdb)
gdb
gdb [target]
gdb [target] [config]
"""
import subprocess
from mod import log, util, config, project, settings
#-------------------------------------------------------------------------------
def gdb(fips_dir, proj_dir, cfg_name, target=None) :
"""debug a single target with gdb"""
# prepare
proj_name = util.get_project_name_from_dir(proj_dir)
util.ensure_valid_project_dir(proj_dir)
# load the config(s)
configs = config.load(fips_dir, proj_dir, cfg_name)
if configs :
for cfg in configs :
# check if config is valid
config_valid, _ = config.check_config_valid(fips_dir, cfg, print_errors = True)
if config_valid :
deploy_dir = util.get_deploy_dir(fips_dir, proj_name, cfg)
log.colored(log.YELLOW, "=== gdb: {}".format(cfg['name']))
cmdLine = ['gdb', target]
subprocess.call(args = cmdLine, cwd = deploy_dir)
else :
log.error("Config '{}' not valid in this environment".format(cfg['name']))
else :
log.error("No valid configs found for '{}'".format(cfg_name))
if num_valid_configs != len(configs) :
log.error('{} out of {} configs failed!'.format(len(configs) - num_valid_configs, len(configs)))
return False
else :
log.colored(log.GREEN, '{} configs built'.format(num_valid_configs))
return True
#-------------------------------------------------------------------------------
def run(fips_dir, proj_dir, args) :
"""debug a single target with gdb"""
if not util.is_valid_project_dir(proj_dir) :
log.error('must be run in a project directory')
tgt_name = None
cfg_name = None
if len(args) > 0 :
tgt_name = args[0]
if len(args) > 1:
cfg_name = args[1]
if not cfg_name :
cfg_name = settings.get(proj_dir, 'config')
if not tgt_name :
tgt_name = settings.get(proj_dir, 'target')
if not tgt_name :
log.error('no target specified')
gdb(fips_dir, proj_dir, cfg_name, tgt_name)
#-------------------------------------------------------------------------------
def help() :
"""print 'gdb' help"""
log.info(log.YELLOW +
"fips gdb\n"
"fips gdb [target]\n"
"fips gdb [target] [config]\n" + log.DEF +
" debug a single target in current or named config")
| mit | Python |
0118316df964c09198747255f9f3339ed736066d | Create test.py | andresitodeguzman/twtpy | test/test.py | test/test.py | # TweetPy
# Test
import unittest
import tweet
class SampleTestClass(unittest.TestCase):
def sampleTest(self):
#do something
a = 1
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
fa4b01102d1226ccc3dcf58119053bbc8839c36e | add ex42 | Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python | lpthw/ex42.py | lpthw/ex42.py | #!/usr/bin/env python
# Exercise 42: Is-A, Has-A, Objects, and Classes
## Animal is-a object (yes, sort of confusing) look at the extra credit
class Animal(object):
pass
## ??
class Dog(Animal):
def __init__(self, name):
## ??
self.name = name
## ??
class Cat(Animal):
def __init__(self, name):
## ??
self.name = name
## ??
class Person(object):
def __init__(self, name):
## ??
self.name = name
## Person has-a pet of some kind
self.pet = None
## ??
class Employee(Person):
def __init__(self, name, salary):
## ?? hmm what is this strange magic?
super(Employee, self).__init__(name)
## ??
self.salary = salary
## ??
class Fish(object):
pass
## ??
class Salmon(Fish):
pass
## ??
class Halibut(Fish):
pass
## rover is-a Dog
rover = Dog("Rover")
## ??
satan = Cat("Satan")
## ??
mary = Person("Mary")
## ??
mary.pet = satan
## ??
frank = Employee("Frank", 120000)
## ??
frank.pet = rover
## ??
flipper = Fish()
## ??
crouse = Salmon()
## ??
harry = Halibut()
| mit | Python |
|
94bc0d6596aba987943bf40e2289f34240081713 | Add lc0041_first_missing_positive.py | bowen0701/algorithms_data_structures | lc0041_first_missing_positive.py | lc0041_first_missing_positive.py | """Leetcode 41. First Missing Positive
Hard
URL: https://leetcode.com/problems/first-missing-positive/
Given an unsorted integer array, find the smallest missing positive integer.
Example 1:
Input: [1,2,0]
Output: 3
Example 2:
Input: [3,4,-1,1]
Output: 2
Example 3:
Input: [7,8,9,11,12]
Output: 1
Note:
Your algorithm should run in O(n) time and uses constant extra space.
"""
class Solution(object):
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
|
1aba0e5ba6aa91c2aa608c2c94411c59e4a3eca5 | Create stack_plot.py | psyllost/02819 | stack_plot.py | stack_plot.py | # -*- coding: utf-8 -*-
"""
Includes a function for visualization of data with a stack plot.
"""
from matplotlib import pyplot as plt
from matplotlib import ticker
import random
def stack(number_of_topics, TopicTitles, X, Y):
"""Creates a stack plot for the number of papers published from 2002 to 2014
for each topic"""
# random colors as RGB
colors = [(random.randint(0,255),random.randint(0,255),random.randint(0,255)) for i in range(number_of_topics)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(colors)):
r, g, b = colors[i]
colors[i] = (r / 255., g / 255., b / 255.)
plt.figure(num=1,figsize=(30,27))
ax1 = plt.subplot()
x_formatter = ticker.ScalarFormatter(useOffset=False)
y_formatter = ticker.ScalarFormatter(useOffset=False)
ax1.yaxis.set_major_formatter(y_formatter)
ax1.xaxis.set_major_formatter(x_formatter)
ax1.set_ylabel('Number of Papers')
ax1.set_xlabel('Year of Publication')
polys = ax1.stackplot(X, Y, colors=colors)
legendProxies = []
for poly in polys:
legendProxies.append(plt.Rectangle((0, 0), 1, 1, fc=poly.get_facecolor()[0]))
plt.legend(legendProxies,TopicTitles,prop={'size':8})
plt.tight_layout(pad=1.08, h_pad=None, w_pad=None, rect=None)
plt.show()
| apache-2.0 | Python |
|
d4ac57f3a328dd98b76f6c8924ddc9d735c32c04 | Add py-sphinxcontrib-qthelp package (#13275) | iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack | var/spack/repos/builtin/packages/py-sphinxcontrib-qthelp/package.py | var/spack/repos/builtin/packages/py-sphinxcontrib-qthelp/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySphinxcontribQthelp(PythonPackage):
"""sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp
document."""
homepage = "http://sphinx-doc.org/"
url = "https://pypi.io/packages/source/s/sphinxcontrib-qthelp/sphinxcontrib-qthelp-1.0.2.tar.gz"
version('1.0.2', sha256='79465ce11ae5694ff165becda529a600c754f4bc459778778c7017374d4d406f')
depends_on('python@3.5:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
def test(self):
# Requires sphinx, creating a circular dependency
pass
| lgpl-2.1 | Python |
|
6b5587fc7856b5d03b3605e1a31234ff98df88e2 | add L3 quiz - Expressions | udacity/course-front-end-frameworks,udacity/course-front-end-frameworks,udacity/course-front-end-frameworks | lesson3/quizExpressions/unit_tests.py | lesson3/quizExpressions/unit_tests.py | import re
is_correct = False
brace_regex = "{{.*}}"
color_regex = "(?:brick.)?color"
size_regex = "(?:brick.)?size"
price_regex = "(?:brick.)?price"
heading = widget_inputs["text1"]
brick_color = widget_inputs["text2"]
brick_size = widget_inputs["text3"]
brick_price = widget_inputs["text4"]
brick_description = widget_inputs["text5"]
comments = []
def commentizer(new):
if new not in comments:
comments.append(new)
if heading == '':
is_correct = True
else:
commentizer("Do you think the heading should change if you use a different brick? Why would a different brick make the heading change?")
#check the brick's color matches a RegEx
if re.search( color_regex, brick_color ):
if not re.search( brace_regex, brick_color ):
is_correct = False
commentizer("What you entered into the color field is correct, but it's still regular text. How do you create an expression in Angular?")
else:
is_correct = is_correct and True
else:
is_correct = False
commentizer("The color field is not correct.")
#check the brick's size matches a RegEx
if re.search( size_regex, brick_size ):
if not re.search( brace_regex, brick_size ):
is_correct = False
commentizer("What you entered into the size field is correct, but it's still regular text. How do you create an expression in Angular?")
else:
is_correct = is_correct and True
else:
is_correct = False
commentizer("The size field is not correct.")
#check the brick's price matches a RegEx
if re.search( price_regex, brick_price ):
if not re.search( brace_regex, brick_price ):
is_correct = False
commentizer("What you entered into the price field is correct, but it's still regular text. How do you create an expression in Angular?")
else:
is_correct = is_correct and True
else:
is_correct = False
commentizer("The price field is not correct.")
# if they're all unchecked
if not any([heading, brick_color, brick_size, brick_price, brick_description]):
is_correct = False
comments = []
comments.append('At least one of these should be converted into an expression.\n\nLook at the data in the template and ask yourself, "Will this change if I use a different brick?" If the answer is yes, then enter the expression into the appropriate field.')
if is_correct:
commentizer("Great job!")
grade_result["comment"] = "\n\n".join(comments)
grade_result["correct"] = is_correct
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.