commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
24d1162740aa9a9948665d97dc082a555a1ccf13 | Rename initial_args to standard argv. | ssundarraj/grip,mgoddard-pivotal/grip,mgoddard-pivotal/grip,ssundarraj/grip,joeyespo/grip,jbarreras/grip,jbarreras/grip,joeyespo/grip | grip/command.py | grip/command.py | """\
grip.command
~~~~~~~~~~~~
Implements the command-line interface for Grip.
Usage:
grip [options] [<path>] [<address>]
grip -h | --help
grip --version
Where:
<path> is a file to render or a directory containing a README.md file
<address> is what to listen on, of the form <host>[:<port>], or just <port>
Options:
--gfm Use GitHub-Flavored Markdown, e.g. comments or issues
--context=<repo> The repository context, only taken into account with --gfm
"""
import sys
from path_and_address import resolve, split_address
from docopt import docopt
from .server import serve
from . import __version__
usage = '\n\n\n'.join(__doc__.split('\n\n\n')[1:])
def main(argv=None):
"""The entry point of the application."""
if argv is None:
argv = sys.argv[1:]
version = 'Grip ' + __version__
# Parse options
args = docopt(usage, argv=argv, version=version)
# Parse arguments
path, address = resolve(args['<path>'], args['<address>'])
host, port = split_address(address)
# Validate address
if address and not host and not port:
print 'Error: Invalid address', repr(address)
# Run server
try:
serve(path, host, port, args['--gfm'], args['--context'])
return 0
except ValueError, ex:
print 'Error:', ex
return 1
| """\
grip.command
~~~~~~~~~~~~
Implements the command-line interface for Grip.
Usage:
grip [options] [<path>] [<address>]
grip -h | --help
grip --version
Where:
<path> is a file to render or a directory containing a README.md file
<address> is what to listen on, of the form <host>[:<port>], or just <port>
Options:
--gfm Use GitHub-Flavored Markdown, e.g. comments or issues
--context=<repo> The repository context, only taken into account with --gfm
"""
import sys
from path_and_address import resolve, split_address
from docopt import docopt
from .server import serve
from . import __version__
usage = '\n\n\n'.join(__doc__.split('\n\n\n')[1:])
def main(initial_args=None):
"""The entry point of the application."""
if initial_args is None:
initial_args = sys.argv[1:]
version = 'Grip ' + __version__
# Parse options
args = docopt(usage, argv=initial_args, version=version)
# Parse arguments
path, address = resolve(args['<path>'], args['<address>'])
host, port = split_address(address)
# Validate address
if address and not host and not port:
print 'Error: Invalid address', repr(address)
# Run server
try:
serve(path, host, port, args['--gfm'], args['--context'])
return 0
except ValueError, ex:
print 'Error:', ex
return 1
| mit | Python |
cf357e46b3d9664325ca69f3b7c0393c89ad44a7 | Add some function tests. | sapir/tinywhat,sapir/tinywhat,sapir/tinywhat | tests/test_func.py | tests/test_func.py | from .utils import assert_eval
def test_simple_func():
assert_eval('(def @a $a 8) (@a)', 1, 8)
def test_simple_func_args():
assert_eval(
'(def @a $a $a)'
'(@a 1)'
'(@a 2)'
'(@a 5)',
1,
1,
2,
5)
def test_func_args_overwrite_globals():
assert_eval(
'(def @a $a 3)'
'(set $a 10)'
'$a'
'(@a 8)'
'$a',
1,
10,
10,
3,
8,
)
def test_func_args_with_offset():
assert_eval(
'(def @a $d (+ $d $i))'
'(def @b $i (+ $i $j))'
'(@a 1 2 3)'
'(@b 8 9 10)'
'$a\n$b\n$c\n$d\n$e\n$i\n$j\n$k\n',
1, 1,
4,
17,
0, 0, 0, 1, 2, 8, 9, 10,
)
| bsd-3-clause | Python |
|
2628bfa261c9bb76f4d3742bbb36f1179d961c83 | add Pool and OrderedPool tests | teepark/greenhouse | tests/test_pool.py | tests/test_pool.py | import unittest
import greenhouse
import greenhouse.poller
from test_base import TESTING_TIMEOUT, StateClearingTestCase
class PoolTestCase(StateClearingTestCase):
POOL = greenhouse.Pool
def test_basic(self):
def f(x):
return x ** 2
pool = self.POOL(f)
pool.start()
for x in xrange(30):
pool.put(x)
l = []
for x in xrange(30):
l.append(pool.get())
l.sort()
assert l == [x ** 2 for x in xrange(30)]
pool.close()
def test_with_blocking(self):
def f(x):
if x % 2:
greenhouse.pause()
return x ** 2
pool = self.POOL(f)
pool.start()
for x in xrange(30):
pool.put(x)
l = []
for x in xrange(30):
l.append(pool.get())
l.sort()
assert l == [x ** 2 for x in xrange(30)]
pool.close()
def test_shuts_down(self):
def f(x):
return x ** 2
pool = self.POOL(f)
pool.start()
for x in xrange(30):
pool.put(x)
for x in xrange(30):
pool.get()
pool.close()
for x in xrange(30):
pool.put(x)
greenhouse.pause()
assert len(pool.inq.queue) == 30, len(pool.inq.queue)
def test_as_context_manager(self):
def f(x):
return x ** 2
with self.POOL(f) as pool:
for x in xrange(30):
pool.put(x)
l = []
for x in xrange(30):
l.append(pool.get())
l.sort()
assert l == [x ** 2 for x in xrange(30)]
def test_starting_back_up(self):
def f(x):
return x ** 2
pool = self.POOL(f)
pool.start()
for x in xrange(30):
pool.put(x)
for x in xrange(30):
pool.get()
pool.close()
pool.start()
for x in xrange(30):
pool.put(x)
l = []
for x in xrange(30):
l.append(pool.get())
l.sort()
assert l == [x ** 2 for x in xrange(30)]
class OrderedPoolTestCase(PoolTestCase):
POOL = greenhouse.OrderedPool
def test_ordered_basic(self):
def f(x):
return x ** 2
pool = self.POOL(f)
pool.start()
for x in xrange(30):
pool.put(x)
l = []
for x in xrange(30):
l.append(pool.get())
assert l == [x ** 2 for x in xrange(30)]
pool.close()
def test_ordered_with_blocking(self):
def f(x):
if x % 2:
greenhouse.pause()
return x ** 2
pool = self.POOL(f)
pool.start()
for x in xrange(30):
pool.put(x)
l = []
for x in xrange(30):
l.append(pool.get())
assert l == [x ** 2 for x in xrange(30)]
pool.close()
| bsd-3-clause | Python |
|
ec6dff24e3049ddaab392f0bc5b8d8b724e41e20 | Print the trending Python repos on GitHub | cclauss/Ten-lines-or-less | trending_python.py | trending_python.py | #!/usr/bin/env python3
import bs4
import requests
url = 'https://github.com/trending?l=Python'
soup = bs4.BeautifulSoup(requests.get(url).content, 'lxml') # or 'html5lib'
repos = soup.find('ol', class_="repo-list").find_all('a', href=True)
repos = (r.text.strip().replace(' ', '') for r in repos if '/' in r.text)
print('\n'.join(repos))
| apache-2.0 | Python |
|
37691851b6e21a6a51140f512fd9802e964b0785 | Create beta_pythons_dynamic_classes_3.py | Orange9000/Codewars,Orange9000/Codewars | Solutions/beta/beta_pythons_dynamic_classes_3.py | Solutions/beta/beta_pythons_dynamic_classes_3.py | def create_class(class_name, secrets = None):
if not class_name: return None
class NewClass(object):
pass
NewClass.__name__ = class_name
if not secrets: return NewClass
for i in secrets:
if 'function' in str(type(secrets[i])):
setattr(NewClass, i, classmethod(secrets[i]))
else:
setattr(NewClass, i, secrets[i])
return NewClass
| mit | Python |
|
32d9a97336c786660a838dc69cfab2ebe3436343 | update viafReconciliationPeople.py | jhu-archives-and-manuscripts/MARAC_API_Workshop | viafReconciliationPeople.py | viafReconciliationPeople.py | import requests
import csv
from fuzzywuzzy import fuzz
import json
import urllib
baseURL = 'http://viaf.org/viaf/search/viaf?query=local.personalNames+%3D+%22'
f=csv.writer(open('viafPeopleResults.csv', 'wb'))
f.writerow(['search']+['result']+['viaf']+['lc']+['isni']+['ratio']+['partialRatio']+['tokenSort']+['tokenSet']+['avg'])
with open('people.txt') as txt:
for row in txt:
print row
rowEdited = urllib.quote(row.decode('utf-8-sig').encode('utf-8').strip())
url = baseURL+rowEdited+'%22+and+local.sources+%3D+%22lc%22&sortKeys=holdingscount&maximumRecords=1&httpAccept=application/rdf+json'
response = requests.get(url).content
try:
response = response[response.index('<recordData xsi:type="ns1:stringOrXmlFragment">')+47:response.index('</recordData>')].replace('"','"')
response = json.loads(response)
label = response['mainHeadings']['data'][0]['text']
viafid = response['viafID']
except:
label = ''
viafid = ''
ratio = fuzz.ratio(row, label)
partialRatio = fuzz.partial_ratio(row, label)
tokenSort = fuzz.token_sort_ratio(row, label)
tokenSet = fuzz.token_set_ratio(row, label)
avg = (ratio+partialRatio+tokenSort+tokenSet)/4
if viafid != '':
links = json.loads(requests.get('http://viaf.org/viaf/'+viafid+'/justlinks.json').text)
viafid = 'http://viaf.org/viaf/'+viafid
try:
lc = 'http://id.loc.gov/authorities/names/'+json.dumps(links['LC'][0]).replace('"','')
except:
lc = ''
try:
isni = 'http://isni.org/isni/'+json.dumps(links['ISNI'][0]).replace('"','')
except:
isni = ''
else:
lc = ''
isni = ''
f=csv.writer(open('viafPeopleResults.csv', 'a'))
f.writerow([row.strip()]+[label]+[viafid]+[lc]+[isni]+[ratio]+[partialRatio]+[tokenSort]+[tokenSet]+[avg])
| mit | Python |
|
2934f80f294759ec202e0305025da2d7e71d3ae3 | Add plot_throughput.py. | jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools | problem/net_file_xfer_tput_174608/plot_throughput.py | problem/net_file_xfer_tput_174608/plot_throughput.py | #! /usr/bin/env python3
# Copyright 2017 John Hanley.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
from bs4 import BeautifulSoup
import datetime
import matplotlib.pyplot as plt
import os
import re
import requests
'''Summarizes data from codereview.stackexchange.com/questions/174608/.'''
def get_cached_pastebin_text(url):
fspec = os.path.basename(url) + '.html'
if not os.path.exists(fspec):
r = requests.get(url)
assert r.ok
with open(fspec, 'w') as fout:
fout.write(r.text)
soup = BeautifulSoup(open(fspec).read(), 'html.parser')
raw = str(soup.find(id='paste_code'))
return raw.split('\n')
def hms(stamp):
'''12:00:00 -> noon.'''
h, m, s = (int(n) for n in stamp.split(':'))
today = datetime.date.today()
return datetime.datetime(
year=today.year, month=today.month, day=today.day,
hour=h, minute=m, second=s)
def get_progress(chunk_size, url='https://pastebin.com/ehncSeqD'):
chunk_re = re.compile(
r'^(\d{2}:\d{2}:\d{2}) - Chunk (\d+) of (\d+)')
detail_re = re.compile(
r'^(\d{2}:\d{2}:\d{2}) - Interconnect. (\d+) of (\d+)')
cur_chunk = -1
for line in get_cached_pastebin_text(url):
m = chunk_re.search(line)
if m:
assert cur_chunk < int(m.group(2)) # strictly monotonic
cur_chunk = int(m.group(2))
m = detail_re.search(line)
if m:
assert chunk_size >= int(m.group(3))
yield(hms(m.group(1)),
cur_chunk * chunk_size + int(m.group(2)))
def plot_tput(chunk_size=2e5, verbose=False):
prog = {} # maps elapsed time to download progress (in bytes)
start = None
for stamp, bytes in get_progress(int(chunk_size)):
if start:
elapsed = int((stamp - start).total_seconds())
# With limited resolution (1sec) timestamps, last measurement wins.
prog[elapsed] = bytes
if verbose:
print(elapsed, bytes)
else:
start = stamp
x = [p[0] for p in prog.items()]
y = [p[1] / 1024.0 for p in prog.items()] # total KBytes downloaded so far
plt.scatter(x, y)
plt.show()
if __name__ == '__main__':
os.chdir('/tmp')
plot_tput()
| mit | Python |
|
3ea69c783393b6c62f3428c6ec83a24fe7634b6c | add grader in Python | irakli-janiashvili/codewars,irakli-janiashvili/codewars,irakli-janiashvili/codewars | 8-kyu/grader.py | 8-kyu/grader.py | def grader(score):
if score < 0.6 or score > 1:
return 'F'
elif score < 0.7:
return 'D'
elif score < 0.8:
return 'C'
elif score < 0.9:
return 'B'
else:
return 'A'
| mit | Python |
|
37dda1d235017bebb9bb0f6eff150dd12222762f | remove organisation from db | alphagov/notifications-api,alphagov/notifications-api | migrations/versions/0162_remove_org.py | migrations/versions/0162_remove_org.py | """
Revision ID: 0162_remove_org
Revises: 0161_email_branding
Create Date: 2018-02-06 17:08:11.879844
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0162_remove_org'
down_revision = '0161_email_branding'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('services', 'organisation_id')
op.drop_column('services_history', 'organisation_id')
op.drop_table('organisation')
op.alter_column('service_email_branding', 'email_branding_id', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('services_history', sa.Column('organisation_id', postgresql.UUID(), autoincrement=False, nullable=True)) # noqa
op.add_column('services', sa.Column('organisation_id', postgresql.UUID(), autoincrement=False, nullable=True))
op.create_table(
'organisation',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('colour', sa.VARCHAR(length=7), autoincrement=False, nullable=True),
sa.Column('logo', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('name', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='organisation_pkey')
)
op.create_index('ix_services_history_organisation_id', 'services_history', ['organisation_id'], unique=False)
op.create_foreign_key('services_organisation_id_fkey', 'services', 'organisation', ['organisation_id'], ['id'])
op.create_index('ix_services_organisation_id', 'services', ['organisation_id'], unique=False)
op.alter_column('service_email_branding', 'email_branding_id', nullable=True)
| mit | Python |
|
d1e8a8bb6ffc852bf07c40968029c5def7dc0a96 | Correct the dict | Saviq/nova-compute-lxd,tpouyer/nova-lxd,mmasaki/nova-compute-lxd,Saviq/nova-compute-lxd,mmasaki/nova-compute-lxd,tpouyer/nova-lxd | nclxd/nova/virt/lxd/host_utils.py | nclxd/nova/virt/lxd/host_utils.py | # Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
def get_fs_info(path):
"""get free/used/total space info for a filesystem
:param path: Any dirent on the filesystem
:returns: A dict containing
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
:total: How big the filesytem is (in bytes)
"""
hddinfo = os.statvfs(path)
total = hddinfo.f_frsize * hddinfo.f_blocks
used = (hddinfo.f_blocks - hddinfo.f_bfree) * hddinfo.f_frsize
available = st.f_bavail * st.f_frsize
return {'total': total,
'available': available,
'used': used}
def get_memory_mb_usage():
"""Get the used memory size(MB) of the host.
"returns: the total usage of memory(MB)
"""
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemTotal:')
idx2 = m.index('MemFree:')
idx3 = m.index('Buffers:')
idx4 = m.index('Cached:')
total = int(m[idx1 + 1])
avail = int(m[idx2 + 1]) + int(m[idx3 + 1]) + int(m[idx4 + 1])
return {
'total': total * 1024,
'used': (total - avail) * 1024
}
| # Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
def get_fs_info(path):
"""get free/used/total space info for a filesystem
:param path: Any dirent on the filesystem
:returns: A dict containing
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
:total: How big the filesytem is (in bytes)
"""
hddinfo = os.statvfs(path)
total = hddinfo.f_frsize * hddinfo.f_blocks
used = (hddinfo.f_blocks - hddinfo.f_bfree) * hddinfo.f_frsize
available = st.f_bavail * st.f_frsize
return {'total': total,
'available': free,
'used': used}
def get_memory_mb_usage():
"""Get the used memory size(MB) of the host.
"returns: the total usage of memory(MB)
"""
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemTotal:')
idx2 = m.index('MemFree:')
idx3 = m.index('Buffers:')
idx4 = m.index('Cached:')
total = int(m[idx1 + 1])
avail = int(m[idx2 + 1]) + int(m[idx3 + 1]) + int(m[idx4 + 1])
return {
'total': total * 1024,
'used': (total - avail) * 1024
}
| apache-2.0 | Python |
85060c7653a04f18e6f5cd016e113327ba3a2878 | Add support for Sercomm IP camera discovery. (#238) | balloob/netdisco | netdisco/discoverables/sercomm.py | netdisco/discoverables/sercomm.py | """
Discover Sercomm network cameras.
These are rebranded as iControl and many others, and are usually
distributed as part of an ADT or Comcast/Xfinity monitoring package.
https://github.com/edent/Sercomm-API
"""
from . import SSDPDiscoverable
class Discoverable(SSDPDiscoverable):
"""Add support for discovering camera services."""
def get_entries(self):
"""Get all Sercomm iControl devices."""
return self.find_by_device_description({'manufacturer': 'iControl'})
| mit | Python |
|
1295f2867eb7348959d86618b8e80c001cc41ff7 | Add 'lib' init module. | Pylons/akhet,hlwsmith/akhet,hlwsmith/akhet,hlwsmith/akhet,Pylons/akhet | akhet/paster_templates/akhet/+package+/lib/__init__.py | akhet/paster_templates/akhet/+package+/lib/__init__.py | """Miscellaneous support packages for {{project}}.
"""
| mit | Python |
|
8fe99eedd4e1a1604277c42ed8f2ea0dc2e622de | add simple csv utility module | berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud | mediacloud/mediawords/util/csv.py | mediacloud/mediawords/util/csv.py | """Utility functions for dealing with csvs."""
import csv
import io
def get_csv_string_from_dicts(dicts: list) -> str:
"""Given a list of dicts, return a representative csv string."""
if len(dicts) < 1:
return ''
csvio = io.StringIO()
csvwriter = csv.DictWriter(csvio, fieldnames=dicts[0].keys())
csvwriter.writeheader()
[csvwriter.writerow(d) for d in dicts]
return csvio.getvalue()
def get_dicts_from_csv_string(csvstring: str) -> list:
"""Given a csv string, return a list of dicts."""
if len(csvstring) < 1:
return []
csvio = io.StringIO(csvstring)
return list(csv.DictReader(csvio))
| agpl-3.0 | Python |
|
f6c2d5e37685b149cfd447545c58ce1fc4d836b9 | Add function to create view for Span candidate subclasses | jasontlam/snorkel,HazyResearch/snorkel,HazyResearch/snorkel,jasontlam/snorkel,jasontlam/snorkel,HazyResearch/snorkel | snorkel/models/views.py | snorkel/models/views.py |
def create_serialized_candidate_view(session, C, verbose=True):
"""Creates a view in the database for a Candidate sub-class C defined over
Span contexts, which are direct children of a single sentence.
Creates VIEW with schema:
candidate.id, candidate.split, span0.*, ..., spanK.*, sentence.*
NOTE: This limited functionality should be expanded for arbitrary context
trees. Also this should be made more dialect-independent.
"""
selects, froms, joins = [], [], []
for i, arg in enumerate(C.__argnames__):
selects.append("span{0}.*".format(i))
froms.append("span AS span{0}".format(i))
joins.append("{0}.{1}_id = span{2}.id".format(C.__tablename__, arg, i))
sql = """
CREATE VIEW {0}_serialized AS
SELECT
candidate.id,
candidate.split,
{1},
sentence.*
FROM
candidate,
{0},
{2},
sentence
WHERE
candidate.id = {0}.id
AND sentence.id = span0.sentence_id
AND {3}
""".format(
C.__tablename__,
", ".join(selects),
", ".join(froms),
" AND ".join(joins)
)
if verbose:
print("Creating view...")
print(sql)
session.execute(sql) | apache-2.0 | Python |
|
135324dd3346f7830abbe64cb5eadf82d1ca963c | add - module for generating data sets. | rfaulkner/easyML,rfaulkner/easyML,rfaulkner/easyML,rfaulkner/easyML | versus/src/data.py | versus/src/data.py | """
Module for loading datasets
"""
import gzip
import theano.tensor as T
import theano
import numpy
import cPickle
import os
def load_MNIST(dataset):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
#############
# LOAD DATA #
#############
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(os.path.split(__file__)[0], "..", "data", dataset)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
import urllib
origin = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
print 'Downloading data from %s' % origin
urllib.urlretrieve(origin, dataset)
print '... loading data'
# Load the dataset
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
#train_set, valid_set, test_set format: tuple(input, target)
#input is an numpy.ndarray of 2 dimensions (a matrix)
#witch row's correspond to an example. target is a
#numpy.ndarray of 1 dimensions (vector)) that have the same length as
#the number of rows in the input. It should give the target
#target to the example with the same index in the input.
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval | bsd-3-clause | Python |
|
9732c401fb51ae0b757be5108835b71e7c389850 | Add tests | danirus/django-comments-xtd,danirus/django-comments-xtd,danirus/django-comments-xtd,danirus/django-comments-xtd | django_comments_xtd/tests/test_get_version.py | django_comments_xtd/tests/test_get_version.py | try:
from unittest.mock import patch
except ImportError:
from mock import patch
from django.test import TestCase
class GetVersionTestCase(TestCase):
@patch('django_comments_xtd.VERSION', (2, 8, 0, 'f', 0))
def test_get_version_when_patch_equal_to_zero(self):
from django_comments_xtd import get_version
self.assertEqual(get_version(), '2.8.0')
@patch('django_comments_xtd.VERSION', (2, 8, 1, 'f', 0))
def test_get_version_when_patch_greater_than_zero(self):
from django_comments_xtd import get_version
self.assertEqual(get_version(), '2.8.1')
| bsd-2-clause | Python |
|
2eb163c5dd675c2e7a9cedb5d6868545833cbf34 | Add lemma rules | oroszgy/spaCy.hu,raphael0202/spaCy,oroszgy/spaCy.hu,Gregory-Howard/spaCy,honnibal/spaCy,banglakit/spaCy,explosion/spaCy,spacy-io/spaCy,honnibal/spaCy,oroszgy/spaCy.hu,raphael0202/spaCy,raphael0202/spaCy,banglakit/spaCy,aikramer2/spaCy,banglakit/spaCy,recognai/spaCy,explosion/spaCy,recognai/spaCy,spacy-io/spaCy,aikramer2/spaCy,explosion/spaCy,raphael0202/spaCy,aikramer2/spaCy,recognai/spaCy,banglakit/spaCy,Gregory-Howard/spaCy,Gregory-Howard/spaCy,banglakit/spaCy,spacy-io/spaCy,explosion/spaCy,aikramer2/spaCy,recognai/spaCy,honnibal/spaCy,banglakit/spaCy,raphael0202/spaCy,oroszgy/spaCy.hu,Gregory-Howard/spaCy,honnibal/spaCy,Gregory-Howard/spaCy,aikramer2/spaCy,spacy-io/spaCy,recognai/spaCy,raphael0202/spaCy,explosion/spaCy,oroszgy/spaCy.hu,spacy-io/spaCy,recognai/spaCy,explosion/spaCy,spacy-io/spaCy,oroszgy/spaCy.hu,Gregory-Howard/spaCy,aikramer2/spaCy | spacy/en/lemma_rules.py | spacy/en/lemma_rules.py | # encoding: utf8
from __future__ import unicode_literals
LEMMA_RULES = {
"noun": [
["s", ""],
["ses", "s"],
["ves", "f"],
["xes", "x"],
["zes", "z"],
["ches", "ch"],
["shes", "sh"],
["men", "man"],
["ies", "y"]
],
"verb": [
["s", ""],
["ies", "y"],
["es", "e"],
["es", ""],
["ed", "e"],
["ed", ""],
["ing", "e"],
["ing", ""]
],
"adj": [
["er", ""],
["est", ""],
["er", "e"],
["est", "e"]
],
"punct": [
["“", "\""],
["”", "\""],
["\u2018", "'"],
["\u2019", "'"]
]
}
| mit | Python |
|
45628f2abd6ec66ad48679732d600174a3a7de26 | add a script | odf/gavrog,odf/gavrog,odf/gavrog,odf/gavrog,odf/gavrog | jython/surfaceMapToDs.py | jython/surfaceMapToDs.py | #!/bin/env jython
import sys
import java.io
import org.gavrog
def dsymbolFromCyclicAdjacencies(adjs):
vertexToChamber = {}
edgeToChamber = {}
chamberToVertex = {}
size = 0
for v in adjs:
vertexToChamber[v] = size
for w in adjs[v]:
if w == v:
raise RuntimeException("found a loop at vertex %s" % v)
else:
edgeToChamber[v, w] = size
chamberToVertex[size] = v
chamberToVertex[size + 1] = v
size += 2
ds = org.gavrog.joss.dsyms.basic.DynamicDSymbol(2)
elms = ds.grow(size)
for v, w in edgeToChamber:
D = edgeToChamber[v, w]
E = edgeToChamber[w, v]
if E is None:
print ("# WARNING: missing %s in adjacencies for %s" % (v, w))
ds.redefineOp(0, elms[D], elms[E + 1])
for v in adjs:
d = 2 * len(adjs[v])
D = vertexToChamber[v]
for i in range(1, d, 2):
ds.redefineOp(1, elms[D + i], elms[D + (i + 1) % d])
for D in range(0, size, 2):
ds.redefineOp(2, elms[D], elms[D + 1])
for D in range(size):
ds.redefineV(0, 1, elms[D], 1)
ds.redefineV(1, 2, elms[D], 1)
return org.gavrog.joss.dsyms.basic.DSymbol(ds), chamberToVertex
if __name__ == '__main__':
import re
text = sys.stdin.read()
data = [ [ int(s) for s in re.split(r' +', line.strip()) ]
for line in re.split(r'\n+', text.strip()) ]
adjs = dict((a[0], a[1:]) for a in data)
ds, _ = dsymbolFromCyclicAdjacencies(adjs)
print ds
| apache-2.0 | Python |
|
4ca336ee7b29609e5cc87dccf1a66c233038aa94 | Create cpp_header_merger.py | ZhreShold/PyGists,ZhreShold/PyGists | cpp_header_merger.py | cpp_header_merger.py | __author__ = 'Joshua Zhang'
"""A C/C++ header merging tool """
import os
import re
import argparse
# matching c/c++ #include patterns
pattern_include = r"#.*include.+(\.hpp|\.h)+"
pattern_squote = r"<.+>"
pattern_quote = r'".+"'
pattern_pragma = r"#pragma.+once"
regex_include = re.compile(pattern_include, re.IGNORECASE)
regex_squote = re.compile(pattern_squote, re.IGNORECASE)
regex_quote = re.compile(pattern_quote, re.IGNORECASE)
regex_pragma = re.compile(pattern_pragma, re.IGNORECASE)
# blacklist
black_list = set()
def custom_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--include', help='Include path for headers', required=True)
parser.add_argument('-o', '--output', help='Output file path', required=False)
parser.add_argument('-e', '--entry', help='Entry header file to start with', required=True)
return parser
def nonblank_lines(f):
for l in f:
line = l.rstrip()
if line:
yield line
def remove_comments(string):
pattern = r"(\".*?\"|\'.*?\')|(/\*.*?\*/|//[^\r\n]*$)"
# first group captures quoted strings (double or single)
# second group captures comments (//single-line or /* multi-line */)
regex = re.compile(pattern, re.MULTILINE|re.DOTALL)
def _replacer(match):
# if the 2nd group (capturing comments) is not None,
# it means we have captured a non-quoted (real) comment string.
if match.group(2) is not None:
return "" # so we will return empty to remove the comment
else: # otherwise, we will return the 1st group
return match.group(1) # captured quoted-string
return regex.sub(_replacer, string)
def replace_nonsystem_header(line, file):
if re.search(regex_include, line) is not None:
if re.search(regex_squote, line) is not None:
target = line.split('<')[-1].split('>')[0]
if target in black_list:
target = 'blacklist'
else:
target = os.path.abspath(include_path + target)
elif re.search(regex_quote, line) is not None:
target = line.split('"')[1]
target = os.path.dirname(os.path.abspath(file)) + '/' + target
else:
raise Exception("Invalid #include header")
target = os.path.abspath(target)
if target not in history:
history.add(target)
return "/*" + line + "*/" + os.linesep + process_header(target)
else:
return "/*" + line + " skipped */"
return line
def process_header(file):
print("Processing: " + file)
try:
with open(file, "rb") as fnow:
this_buffer = []
require_guard = None
# remove c/c++ comments
lines_wo_comments = remove_comments(fnow.read())
for line in nonblank_lines(lines_wo_comments.splitlines()):
new_line = replace_nonsystem_header(line, file)
if re.search(regex_pragma, new_line) is not None:
new_line = ""
require_guard = 1
tmp = file.lstrip(os.path.abspath(include_path)).upper().replace('/', '_').replace('.', '_')
this_guard_name = "_AUTOMATIC_GUARD_" + tmp + "_"
this_buffer.append("#ifndef " + this_guard_name + os.linesep + '#define ' + this_guard_name)
this_buffer.append(new_line)
if require_guard == 1:
this_buffer.append("#endif /* END " + this_guard_name + " */")
this_string = os.linesep.join(this_buffer)
# print(this_string)
return this_string
except IOError:
skipped_list.add(file.lstrip(os.path.abspath(include_path)))
return ''
def merge_header(entry, output):
with open(output, "wb") as fout:
# open output for write
result = process_header(entry)
fout.write(result)
print("Done.")
if __name__ == '__main__':
parser = custom_parser()
args = vars(parser.parse_args())
entry_file = args['entry']
include_path = args['include']
output_file = args['output'] if args['output'] is not None else entry_file + "_out.hpp"
history = set(['blacklist'])
skipped_list = set()
merge_header(entry_file, output_file)
# print skipped files
print("\nThe following files are skipped, should be system headers, otherwise there must have mistakes.")
for skipped in skipped_list:
print("***Unable to open file: " + skipped + ", skipped")
| mit | Python |
|
e212ad90a8fedb8e29abe3683b99a28d4030b544 | Add process module for Popen compat handling | scorphus/passpie,eiginn/passpie,scorphus/passpie,eiginn/passpie,marcwebbie/passpie,marcwebbie/passpie | passpie/process.py | passpie/process.py | from subprocess import Popen, PIPE
from ._compat import *
class Proc(Popen):
def communicate(self, **kwargs):
if kwargs.get('input') and isinstance(kwargs['input'], basestring):
kwargs['input'] = kwargs['input'].encode('utf-8')
return super(Proc, self).communicate(**kwargs)
def __exit__(self, *args, **kwargs):
if hasattr(super(Proc, self), '__exit__'):
super(Proc, self).__exit__(*args, **kwargs)
def __enter__(self, *args, **kwargs):
if hasattr(super(Proc, self), '__enter__'):
return super(Proc, self).__enter__(*args, **kwargs)
return self
def call(*args, **kwargs):
kwargs.setdefault('stdout', PIPE)
kwargs.setdefault('stderr', PIPE)
kwargs.setdefault('stdin', PIPE)
kwargs.setdefault('shell', False)
kwargs_input = kwargs.pop('input', None)
with Proc(*args, **kwargs) as proc:
output, error = proc.communicate(input=kwargs_input)
if isinstance(output, basestring):
output = output.decode('utf-8')
return output, error
| mit | Python |
|
53038aea2b439acdc265f81b9f031336ea1f27f3 | Add lc480_sliding_window_median.py | bowen0701/algorithms_data_structures | lc480_sliding_window_median.py | lc480_sliding_window_median.py | """Leetcode 480. Sliding Window Median
URL: https://leetcode.com/problems/sliding-window-median/
Hard
Median is the middle value in an ordered integer list.
If the size of the list is even, there is no middle value.
So the median is the mean of the two middle value.
Examples:
[2,3,4] , the median is 3
[2,3], the median is (2 + 3) / 2 = 2.5
Given an array nums, there is a sliding window of size k which is moving from
the very left of the array to the very right.
You can only see the k numbers in the window.
Each time the sliding window moves right by one position.
Your job is to output the median array for each window in the original array.
For example,
Given nums = [1,3,-1,-3,5,3,6,7], and k = 3.
Window position Median
--------------- -----
[1 3 -1] -3 5 3 6 7 1
1 [3 -1 -3] 5 3 6 7 -1
1 3 [-1 -3 5] 3 6 7 -1
1 3 -1 [-3 5 3] 6 7 3
1 3 -1 -3 [5 3 6] 7 5
1 3 -1 -3 5 [3 6 7] 6
Therefore, return the median sliding window as [1,-1,-1,3,5,6].
Note:
You may assume k is always valid, ie:
k is always smaller than input array's size for non-empty array.
"""
class Solution(object):
def medianSlidingWindow(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[float]
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
|
9a6bf30ecfa7b843d8588a8a7b052f87089e44c7 | convert csv to excel | BlackBox-CSP/nmap-tor-scanner,BlackBox-CSP/nmap-tor-scanner | write_excel.py | write_excel.py |
def Excel2CSV(ExcelFile, SheetName, CSVFile):
workbook = xlrd.open_workbook(ExcelFile)
try:
worksheet = workbook.sheet_by_name(SheetName)
except xlrd.biffh.XLRDError:
print "Missing portmap for switch " + str(SheetName)
print "Exiting program. Check spelling of Sheet name"
quit()
csvfile = open(CSVFile, 'wb')
wr = csv.writer(csvfile, quotechar="'", quoting=csv.QUOTE_ALL)
for rownum in xrange(worksheet.nrows):
wr.writerow(
list(x.encode('utf-8') if type(x) == type(u'') else x
for x in worksheet.row_values(rownum)))
csvfile.close() | mit | Python |
|
a5ec49a658de23263802c7ddad02a4e34073a2a4 | add example of a go block returning value through a channel | ubolonton/twisted-csp | example/go_block.py | example/go_block.py | import csp
def lazy_echo(x):
yield csp.wait(0.5)
print "I'm done"
yield csp.stop(x)
def main():
chan = csp.go(lazy_echo(1))
print (yield csp.take(chan))
chan = csp.go(lazy_echo(2))
yield csp.wait(2)
print (yield csp.take(chan))
| epl-1.0 | Python |
|
5007a2910f54c339c50667993c11fd4586412524 | add letter code | Mechazawa/WordOn-HD-Bot | wordonhd/Letter.py | wordonhd/Letter.py | class Letter(object):
_values = {
'ENIOA': 1,
'SDTR': 2,
'MLKPBG': 3,
'ZVUFJH': 4,
'CW': 5,
'XY': 8,
'Q': 10
}
def __init__(self, letter):
self.letter = letter[-1]
self.wordon = letter[0] == '!'
@property
def value(self):
return list(filter(lambda x: self.letter in x[0], self._values.items()))[0][1] | bsd-2-clause | Python |
|
3ef7175814cd76621eeee00a26cff786ea032727 | Add flood it example | lawsie/guizero,lawsie/guizero,lawsie/guizero | examples/floodit.py | examples/floodit.py | from guizero import App, Waffle, Text, PushButton, info
import random
# Set up the game - colours, width and height of board and no of moves allowed
colours = ["red", "blue", "green", "yellow", "fuchsia", "purple"]
b_width = 14
b_height = 14
moves_limit = 25
# Set up the palette
def init_palette():
[palette.set_pixel(colours.index(c), 0, c) for c in colours]
# Fill the board with coloured regions
def fill_board():
[board.set_pixel(x, y, random.choice(colours)) for y in range(b_height) for x in range(b_width)]
# Find and flood any squares next to this
def begin_flood(x, y):
replacement = palette.get_pixel(x,y)
target = board.get_pixel(0,0)
flood(0, 0, target, replacement)
win_check()
# Recursively floods adjacent squares
def flood(x, y, target, replacement):
# Algorithm from https://en.wikipedia.org/wiki/Flood_fill
if target == replacement:
return False
if board.get_pixel(x, y) != target:
return False
board.set_pixel(x, y, replacement)
if y+1 <= b_height-1: # South
flood(x, y+1, target, replacement)
if y-1 >= 0: # North
flood(x, y-1, target, replacement)
if x+1 <= b_width-1: # East
flood(x+1, y, target, replacement)
if x-1 >= 0: # West
flood(x-1, y, target, replacement)
# Check if there are any moves left or if they won
def win_check():
moves_left = int(moves_text.value)-1
moves_text.value = moves_left # Update moves left
if moves_left > 0:
squares = board.get_all()
if all(colour == squares[0] for colour in squares):
win_text.value = "Winner!"
reset.visible = True
palette.disable()
else:
win_text.value = "No more moves left!"
reset.visible = True
palette.disable()
# Reset the board and remove the win text/reset button
def reset_board():
reset.visible = False
win_text.value = ""
moves_text.value = moves_limit
init_palette()
fill_board()
palette.enable()
# Set up the game board
app = App("Flood it")
board = Waffle(app, width=b_width, height=b_width, pad=0)
palette = Waffle(app, width=len(colours), height=1, command=begin_flood, dotty=True)
moves_left = Text(app, text="Moves left:")
moves_text = Text(app, text=moves_limit)
# Win text and reset button (initially invisible)
win_text = Text(app)
reset = PushButton(app, text="Start again", command=reset_board)
reset.visible = False
# Initialise the palette and the random board pattern
init_palette()
fill_board()
# Instructions
instructions = PushButton(app, command=info, args=["Instructions", "Click a dot to flood the grid with that colour, beginning from the top left square. You have 25 moves to flood all squares on the grid with the same colour."], text="Instructions")
app.display()
| bsd-3-clause | Python |
|
2e44b753a071aeba95b51bd03c5635a1eb4d7f28 | Create gcd.py | Pouf/CodingCompetition,Pouf/CodingCompetition | CiO/gcd.py | CiO/gcd.py | from fractions import gcd
def greatest_common_divisor(*args):
result, *args = args
for n in args:
result = gcd(result, n)
return result
| mit | Python |
|
239488d33f94b0262e642fbf751878894fb7510e | add test for post form admin in articles | jeanmask/opps,opps/opps,opps/opps,williamroot/opps,williamroot/opps,jeanmask/opps,opps/opps,YACOWS/opps,jeanmask/opps,YACOWS/opps,williamroot/opps,YACOWS/opps,jeanmask/opps,opps/opps,williamroot/opps,YACOWS/opps | opps/articles/tests/test_forms.py | opps/articles/tests/test_forms.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from opps.channels.models import Channel
from opps.core.widgets import OppsEditor
from ..models import Post
from ..forms import PostAdminForm
class PostFormTest(TestCase):
def setUp(self):
User = get_user_model()
self.user = User.objects.create(username=u'test', password='test')
self.site = Site.objects.filter(name=u'example.com').get()
self.channel = Channel.objects.create(name=u'Home', slug=u'home',
description=u'home page',
site=self.site, user=self.user)
def test_init(self):
"""
Test successful init without data
"""
self.post = Post.objects.create(title=u'test', user=self.user,
site=self.site, channel=self.channel)
form = PostAdminForm(instance=self.post)
self.assertTrue(isinstance(form.instance, Post))
self.assertEqual(form.instance.pk, self.post.pk)
def test_default_multiupload_link(self):
"""
Test default value field multiupload link
"""
self.post = Post.objects.create(title=u'test', user=self.user,
site=self.site, channel=self.channel)
form = PostAdminForm(instance=self.post)
self.assertEqual(form.multiupload_link, '/fileupload/image/')
def test_editor_widgets(self):
"""
Test auto set field widget Editor
"""
self.post = Post.objects.create(title=u'test', user=self.user,
site=self.site, channel=self.channel)
form = PostAdminForm(instance=self.post)
self.assertTrue(isinstance(form.fields['content'].widget,
OppsEditor))
| mit | Python |
|
94dbda64d07838a7408b94251972d81897536380 | Add listener example file | mysticuno/MEETY12015MiniProject | listeners_example.py | listeners_example.py | import turtle
turtle.penup()
turtle.ht()
def up():
print("You pressed Up!")
def down():
print("You pressed Down!")
def left():
print("You pressed Left!")
def right():
print("You pressed Right!")
turtle.onkey(up, 'Up')
turtle.onkey(down, 'Down')
turtle.onkey(left, 'Left')
turtle.onkey(right, 'Right')
def repeat():
turtle.ontimer(repeat, 500)
turtle.listen() # Remember to put this after your listeners!
| mit | Python |
|
1d0aff329c5adb836e7b055c042990de219debe0 | Add rough first implementation of widgets.py | Khan/wtforms | wtforms/widgets.py | wtforms/widgets.py | """
wtforms.widgets
~~~~~~~~~~~~~~~
The WTForms widget system.
:copyright: 2009 by James Crasta, Thomas Johansson.
:license: MIT, see LICENSE.txt for details.
"""
from cgi import escape
__all__ = (
'ListWidget', 'TextInput', 'PasswordInput', 'HiddenInput', 'CheckboxInput',
'RadioInput', 'Textarea', 'Select'
)
def html_params(**kwargs):
"""
Generate HTML parameters for keywords
"""
params = []
keys = kwargs.keys()
keys.sort()
for k in keys:
if k in ('class_', 'class__'):
k = k[:-1]
k = unicode(k)
v = escape(unicode(kwargs[k]), quote=True)
params.append(u'%s="%s"' % (k, v))
return str.join(' ', params)
class Widget(object):
"""
Base class for all WTForms widgets.
"""
def render(self, field, **kwargs):
"""
Renders the widget. All widgets must implement this.
`field`
The field to render.
`**kwargs`
Any parameters used for rendering. Typically used to override or
pass extra html attributes.
"""
raise NotImplementedError()
class ListWidget(Widget):
def __init__(self, parent_tag='ul', prefix_label=True):
assert parent_tag in ('ol', 'ul')
self.parent_tag = parent_tag
self.prefix_label = prefix_label
def render(self, field, **kwargs):
html = [u'<%s %s>' % (self.parent_tag, html_params(**kwargs))]
for subfield in field:
if self.prefix_label:
html.append(u'<li>%s: %s</li>' % (subfield.label, subfield()))
else:
out.append(u'<li>%s%s</li>' % (subfield(), subfield.label))
html.append(u'</%s>' % self.parent_tag)
return ''.join(html)
class Input(Widget):
pass
class TextInput(Input):
pass
class PasswordInput(Input):
pass
class HiddenInput(Input):
pass
class CheckboxInput(Input):
pass
class RadioInput(Input):
pass
class Textarea(Widget):
pass
class Select(Widget):
pass
| bsd-3-clause | Python |
|
8062aa6dcb20e3b1294ce62d6d0cce1841fd21e1 | Add cartan_wvecs.py | hershsingh/thesis-iitm-code | cartan_wvecs.py | cartan_wvecs.py | # Author: Hersh Singh [hershdeep@gmail.com]
# Date: August 05, 2013
# Description:
# Given the cartan matrix and the dynkin coefficients of the highest weight, return all the weight vectors, their weights
# Todo: dimensionality of each weight space using freudenthal's formula
# Reference: Cahn Chapter 10
from scipy import *
# Cartan Matrix for the given rep
C = array([[2., -1.], [-1., 2.]]) #SU(3)
#C = array([[2., -1., 0.], [-1., 2., -2.], [0., -1., 2.]]) #B3
N = len(C)
# Dynkin Coeffs for the hightest weight
d_highest = array([1, 0])
#d_highest = array([1, 1]) #SU(3) Adjoint rep
#d_highest = array([0, 0, 1]) #B3
#m[j] = 2<v,alpha[j]>/<alpha[j],alpha[j]>
#M[k] = list of roots at level k
M = [[d_highest]]
Mcoord = [[zeros(N)]]
def get_p(Mcoord, k, i):
#print "\nin get_p"
p = zeros(N)
if k==0:
return p
Mc = Mcoord[k][i]
#print Mc
# for each dynkin coefficient of the current weight vector
for n in range(N):
#print "n=",n
#for each level above the current level
#print k-1
for kk in range(k-1, -1, -1):
element = Mc + (k-kk)*identity(N)[n]
#print 'looking for', element, 'in',Mcoord[kk]
#print matchinlist(Mcoord[kk],element)
if matchinlist(Mcoord[kk], element):
p[n]=p[n]+1
else:
break
return p
# Returns true if element is found in the list
def matchinlist(list, element):
return any([array_equal(e,element) for e in list])
# at level k
k = 0
done_flag = 0
while done_flag == 0:
print ""
print "Level:", k
print "Last row of weight vectors:", M[k]
print "Last row of weight vectors coords:", Mcoord[k]
M.append([])
Mcoord.append([])
for i, v in enumerate(M[k]):
print "Weight vector: ",i,v
p = get_p(Mcoord,k,i)
m = p+v
print "M,P,V: ", m,p,v
if (sum(m>0) == 0):
done_flag = 1
break
v_repeat = tile(v, [sum(m > 0), 1])
Mcoord_repeat = tile(Mcoord[k][i], [sum(m > 0), 1])
new_wvecs = v_repeat - C[m > 0]
# using the fact the True,False is typecasted to 1,0 before doing arithmetic with integers
new_Mcoord = Mcoord_repeat - identity(N)[m > 0]
# Clean up by removing duplicates
#print new_wvecs
for idx,wvec in enumerate(new_wvecs):
if not matchinlist(M[k+1],wvec):
M[k+1].append(wvec)
Mcoord[k+1].append(new_Mcoord[idx])
k=k+1
| mit | Python |
|
be81dbc33e932e870a66ad0663c23e5d05b01ffa | Create Counter.py | ambonilla/Python-Cocos-Counter | Counter.py | Counter.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@_ambonilla 2014
Using cocos & piglet libraries, is a small counter program
where when you push the up key it will add a number to the
displayed value, and the down key will substract one
"""
import cocos
import sys
from cocos.actions import *
import pyglet
from pyglet.window import key
class TempBackground(cocos.layer.Layer):
is_event_handler = True
def on_key_press(self, symbol, modifiers):
if symbol == key.UP:
self.counter = self.counter + 1
elif symbol == key.DOWN:
self.counter = self.counter - 1
elif symbol == key.ESCAPE:
SystemExit()
self.update_text()
def update_text(self):
self.label.element.text = str(self.counter)
def __init__(self):
self.startBackground = super(TempBackground, self).__init__()
self.counter = 0
self.label = cocos.text.Label(str(self.counter),
font_name='Arial',
font_size=150,
anchor_x='center',
anchor_y='center')
self.label.position = 320,240
self.update_text()
self.add(self.label)
if __name__ == "__main__":
cocos.director.director.init(resizable=False, fullscreen=False)
temp_layer = TempBackground()
main_scene = cocos.scene.Scene(temp_layer)
cocos.director.director.run(main_scene)
| mit | Python |
|
c80baf708c956a9814ef81213a66da8d443de12a | add migration | liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin | apps/bplan/migrations/0002_auto_20170509_1358.py | apps/bplan/migrations/0002_auto_20170509_1358.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meinberlin_bplan', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='statement',
name='email',
field=models.EmailField(verbose_name='Email address', max_length=254, blank=True),
),
migrations.AlterField(
model_name='statement',
name='name',
field=models.CharField(verbose_name='Your Name', max_length=255),
),
migrations.AlterField(
model_name='statement',
name='postal_code_city',
field=models.CharField(verbose_name='Postal code, City', max_length=255),
),
migrations.AlterField(
model_name='statement',
name='statement',
field=models.TextField(verbose_name='Statement', max_length=17500),
),
migrations.AlterField(
model_name='statement',
name='street_number',
field=models.CharField(verbose_name='Street, House number', max_length=255),
),
]
| agpl-3.0 | Python |
|
2c78290cc569eb70b5b7098d154da3fb7a2247a9 | Add db_mktag.py, command line tag creator. | drougge/wellpapp-pyclient | db_mktag.py | db_mktag.py | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
from sys import argv, exit
from dbclient import dbclient
if len(argv) not in (2, 3):
print "Usage:", argv[0], "tagname [tagtype]"
exit(1)
client = dbclient()
client.add_tag(*argv[1:])
| mit | Python |
|
3609c5842b33ca4146ad14b74c76f8954545aaa8 | Add commands for cases and variants | moonso/loqusdb | loqusdb/commands/view.py | loqusdb/commands/view.py | # -*- coding: utf-8 -*-
import logging
import click
from . import base_command
logger = logging.getLogger(__name__)
@base_command.command()
@click.option('-c' ,'--case-id',
help='Search for case'
)
@click.pass_context
def cases(ctx, case_id):
"""Display all cases in the database."""
adapter = ctx.obj['adapter']
if case_id:
case = adapter.case(case_id)
if case:
click.echo(case)
else:
logger.info("Case {0} does not exist in database".format(case_id))
else:
i = 0
for case in adapter.cases():
i += 1
click.echo(case)
if i == 0:
logger.info("No cases found in database")
@base_command.command()
@click.option('--variant-id',
help='Search for a variant'
)
@click.pass_context
def variants(ctx, variant_id):
"""Display variants in the database."""
adapter = ctx.obj['adapter']
if variant_id:
variant = adapter.get_variant({'_id':variant_id})
if variant:
click.echo(variant)
else:
logger.info("Variant {0} does not exist in database".format(variant_id))
else:
i = 0
for variant in adapter.get_variants():
i += 1
click.echo(variant)
if i == 0:
logger.info("No variants found in database")
| mit | Python |
|
dd2f332dd1b7a215d5a6aa81819e3d66d46c1b91 | add python solution for 20 | lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler | 01-50/20/20.py | 01-50/20/20.py | import math
print sum(int(c) for c in str(math.factorial(100)).rstrip('L'))
| mit | Python |
|
7f661e24388e82ae2e2872ab11ee6a84d487aac7 | Create py-mysql-select.py | ganmk/python-prctice | py-mysql-select.py | py-mysql-select.py | #!/usr/bin/env python
# --*-- coding:utf-8 --*--
import MySQLdb #操作mysql,需要加载MySQLdb模块
#创建连接
conn = MySQLdb.connect(host = '127.0.0.1',user = 'root',passwd = '123',db = 'mydb') #使用connect方法对数据库进行连接,相当于一个门
cur = conn.cursor() #使用conn.cursor方法,相当于操作的一双手
#操作数据库
reCount = cur.execute('select * from students') #可以看到主函数的操作是查看students表
table = cur.fetchall() #将操作所得到的数据全部拿出来 #
#关闭连接
cur.close() #结束操作后,将手拿回来
conn.close() #将门关上
print reCount #cur.execute返回的是操作影响的行数
print data
| mit | Python |
|
48b2b234377d8e66ccb274e4845a835486228166 | Create test_utils.py | jottenlips/aima-python,phaller0513/aima-python,JoeLaMartina/AlphametricProject,Fruit-Snacks/aima-python,chandlercr/aima-python,abbeymiles/aima-python,JoeLaMartina/aima-python,jo-tez/aima-python,MircoT/aima-python,sofmonk/aima-python,WhittKinley/aima-python,JoeLaMartina/AlphametricProject,JamesDickenson/aima-python,phaller0513/aima-python,AmberJBlue/aima-python,chandlercr/aima-python,willhess/aima-python,grantvk/aima-python,WhittKinley/ConnectProject,AWPorter/aima-python,jottenlips/aima-python,AWPorter/aima-python,JamesDickenson/aima-python,Fruit-Snacks/aima-python,zayneanderson/aima-python,SeanCameronConklin/aima-python,zayneanderson/aima-python,WhittKinley/ConnectProject,austinban/aima-python,gokul-uf/aima-python,WmHHooper/aima-python,WhittKinley/aima-python,JoeLaMartina/AlphametricProject,NolanBecker/aima-python,phaller0513/aima-python,WmHHooper/aima-python,NolanBecker/aima-python,SnShine/aima-python,willhess/aima-python,SeanCameronConklin/aima-python,zayneanderson/aima-python,jottenlips/aima-python,AWPorter/aima-python,armadill-odyssey/aima-python,AmberJBlue/aima-python,reachtarunhere/aima-python,WmHHooper/aima-python,abbeymiles/aima-python,NolanBecker/aima-python,WhittKinley/ConnectProject,willhess/aima-python,austinban/aima-python,SimeonFritz/aima-python,aimacode/aima-python,WmHHooper/aima-python,grantvk/aima-python,Chipe1/aima-python,austinban/aima-python,armadill-odyssey/aima-python,SeanCameronConklin/aima-python,AmberJBlue/aima-python,grantvk/aima-python,JoeLaMartina/aima-python,Chipe1/aima-python,SimeonFritz/aima-python,WhittKinley/aima-python,sofmonk/aima-python,reachtarunhere/aima-python,Fruit-Snacks/aima-python,SimeonFritz/aima-python,abbeymiles/aima-python,chandlercr/aima-python,JoeLaMartina/aima-python,armadill-odyssey/aima-python,JamesDickenson/aima-python,jo-tez/aima-python,SnShine/aima-python,aimacode/aima-python | utils_test.py | utils_test.py | import pytest
from utils import *
def test_struct_initialization():
s = Struct(a=1, b=2)
assert s.a == 1
assert s.b == 2
def test_struct_assignment():
s = Struct(a=1)
s.a = 3
assert s.a == 3
def test_removeall_list():
assert removeall(4, []) == []
assert removeall(4, [1,2,3,4]) == [1,2,3]
def test_removeall_string():
assert removeall('s', '') == ''
assert removeall('s', 'This is a test. Was a test.') == 'Thi i a tet. Wa a tet.'
def test_count_if():
is_odd = lambda x: x % 2
assert count_if(is_odd, []) == 0
assert count_if(is_odd, [1, 2, 3, 4, 5]) == 3
def test_argmax():
assert argmax([-2, 1], lambda x: x**2) == -2
def test_argmin():
assert argmin([-2, 1], lambda x: x**2) == 1
if __name__ == '__main__':
pytest.main()
| mit | Python |
|
7581fbc397915c1ad72714203fee2349a84e14e9 | add notifiaction push script - pushNotif.py | jakdor/SSCAndroidApp,jakdor/SSCAndroidApp,jakdor/SSCAndroidApp | API/ssc/SscData/pushNotif.py | API/ssc/SscData/pushNotif.py | from urllib2 import *
import urllib
import json
import sys
MY_API_KEY="AIzaSyCgSjnjxtYBGMOq7jNgnE_tbhpOJjU5nOo"
messageTitle = sys.argv[1]
messageBody = sys.argv[2]
data={
"to" : "/topics/sscapp",
"notification" : {
"body" : messageBody,
"title" : messageTitle,
"icon" : "notif_icon"
}
}
dataAsJSON = json.dumps(data)
request = Request(
"https://gcm-http.googleapis.com/gcm/send",
dataAsJSON,
{ "Authorization" : "key="+MY_API_KEY,
"Content-type" : "application/json"
}
)
print urlopen(request).read()
| mit | Python |
|
8d5f3136fb737c8058d8b0bb4d866d1fe5bb3af8 | Add main function for specchio | brickgao/specchio | specchio/main.py | specchio/main.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import time
from watchdog.observers import Observer
from specchio.handlers import SpecchioEventHandler
from specchio.utils import logger
def main():
"""Main function for specchio
Example: specchio test/ user@host:test/
:return: None
"""
if len(sys.argv) == 2:
src_path = sys.argv[0].strip()
dst_ssh, dst_path = sys.argv[1].strip().split(":")
event_handler = SpecchioEventHandler(
src_path=src_path, dst_ssh=dst_path, dst_path=dst_path
)
logger.info("Initialize Specchio")
observer = Observer()
observer.schedule(event_handler, src_path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
else:
print """Specchio is a tool that can help you rsync your file
it use `.gitignore` in git to mark which file is ignored.
Usage: specchio src/ user@host:dst"""
| mit | Python |
|
e487ca21da9e7b62a860b91aadfecdf36df005a2 | add public templates module | paolodragone/PyMzn | pymzn/templates.py | pymzn/templates.py |
from .mzn import templates as _templates
from .mzn.templates import *
__all__ = _templates.__all__
| mit | Python |
|
1019f866fc0e9c16ccbe726b4b21265dbfc1ac68 | Add search_rotated_sorted_array.py | ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms | data_structures/sorting/search_rotated_sorted_array.py | data_structures/sorting/search_rotated_sorted_array.py | # Search in a Rotated Sorted Array
# You are given a sorted array which is rotated at some random pivot point.
#
# Example: [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]
#
# You are given a target value to search. If found in the array return its index, otherwise return -1.
#
# You can assume there are no duplicates in the array and your algorithm's runtime complexity
# must be in the order of O(log n).
#
# Example:
#
# Input: nums = [4,5,6,7,0,1,2], target = 0, Output: 4
#
# Here is some boilerplate code and test cases to start with:
def rotated_array_search(input_list, number):
"""
Find the index by searching in a rotated sorted array
Args:
input_list(array), number(int): Input array to search and the target
Returns:
int: Index or -1
"""
left = 0
right = len(input_list) - 1
while left <= right:
mid = (left + right) // 2
if number == input_list[mid]:
return mid
# left sorted portion
if input_list[left] <= input_list[mid]:
if number > input_list[mid] or number < input_list[left]:
left = mid + 1
else:
right = mid - 1
# right sorted portion
else:
if number > input_list[right] or number < input_list[mid]:
right = mid - 1
else:
left = mid + 1
return -1
def linear_search(input_list, number):
for index, element in enumerate(input_list):
if element == number:
return index
return -1
def test_function(test_case):
input_list = test_case[0]
number = test_case[1]
if linear_search(input_list, number) == rotated_array_search(input_list, number):
print("Pass")
else:
print("Fail")
test_function([[6, 7, 8, 9, 10, 1, 2, 3, 4], 6])
test_function([[6, 7, 8, 9, 10, 1, 2, 3, 4], 1])
test_function([[6, 7, 8, 1, 2, 3, 4], 8])
test_function([[6, 7, 8, 1, 2, 3, 4], 1])
test_function([[6, 7, 8, 1, 2, 3, 4], 10])
test_function([[], 0])
test_function([[88], 88])
test_function([[], None]) | cc0-1.0 | Python |
|
ffc1b443f13672d0a4002a38f5273b5f72cdb627 | Solve Even Fibonacci numbers | rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank | python/euler002.py | python/euler002.py | #!/bin/python3
# Project Euler #2: Even Fibonacci numbers
def fibonacci_sequence(n):
sequence = [1, 2]
while sequence[-1] + sequence[-2] < n:
sequence.append(sequence[-1] + sequence[-2])
return sequence
def evens(array):
return list(filter(lambda x: x % 2 == 0, array))
test_cases = int(input().strip())
for _ in range(test_cases):
n = int(input().strip())
print(sum(evens(fibonacci_sequence(n))))
| mit | Python |
|
dc5aad16e63ff210aa3770f6eae18f215f78f8ce | Create 03.py | ezralalonde/cloaked-octo-sansa | 01/hw/03.py | 01/hw/03.py | # Given the variables s and t defined as:
s = 'udacity'
t = 'bodacious'
# write Python code that prints out udacious
# without using any quote characters in
# your code.
print s[:3] + t[4:]
| bsd-2-clause | Python |
|
616e656cb9390321cb36d8f1b067d0bddaff11c2 | Add cli argument parser | frigg/frigg-worker | frigg/worker/cli.py | frigg/worker/cli.py | # -*- coding: utf8 -*-
from fabric import colors
from frigg.worker.fetcher import fetcher
class Commands(object):
@staticmethod
def start():
print(colors.green("Starting frigg worker"))
fetcher()
@staticmethod
def unknown_command():
print(colors.red("Unknown command"))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Do some work for frigg.')
parser.add_argument('command')
args = parser.parse_args()
getattr(Commands, args.command, Commands.unknown_command)() | mit | Python |
|
80caf160aba107f539d18287a09fc30d6cf3d0a1 | add demo plotting the available 1D demo signals | PyWavelets/pywt,rgommers/pywt,rgommers/pywt,rgommers/pywt,rgommers/pywt,grlee77/pywt,grlee77/pywt,PyWavelets/pywt | demo/plot_demo_signals.py | demo/plot_demo_signals.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Plot the set of 1D demo signals available in `pywt.data.demo_signal`."""
import numpy as np
import matplotlib.pyplot as plt
import pywt
# use 'list' to get a list of all available 1d demo signals
signals = pywt.data.demo_signal('list')
subplots_per_fig = 5
signal_length = 1024
i_fig = 0
n_figures = int(np.ceil(len(signals)/subplots_per_fig))
for i_fig in range(n_figures):
# Select a subset of functions for the current plot
func_subset = signals[
i_fig * subplots_per_fig:(i_fig + 1) * subplots_per_fig]
# create a figure to hold this subset of the functions
fig, axes = plt.subplots(subplots_per_fig, 1)
axes = axes.ravel()
for n, signal in enumerate(func_subset):
if signal in ['Gabor', 'sineoneoverx']:
# user cannot specify a length for these two
x = pywt.data.demo_signal(signal)
else:
x = pywt.data.demo_signal(signal, signal_length)
ax = axes[n]
ax.plot(x.real)
if signal == 'Gabor':
# The Gabor signal is complex-valued
ax.plot(x.imag)
ax.legend(['Gabor (Re)', 'Gabor (Im)'], loc='upper left')
else:
ax.legend([signal, ], loc='upper left')
# omit axes for any unused subplots
for n in range(n + 1, len(axes)):
axes[n].set_axis_off()
plt.show()
| mit | Python |
|
4a30d30b82fbdccbb0f15ebb5c094b13ce791f7f | Add a utility class to normalize input | davidmogar/genderator | genderator/utils.py | genderator/utils.py | from unidecode import unidecode
class Normalizer:
def normalize(text):
text = Normalizer.remove_extra_whitespaces(text)
text = Normalizer.replace_hyphens(text)
# text = Normalizer.remove_accent_marks(text)
return text.lower()
@staticmethod
def replace_hyphens(text):
return text.replace('-', ' ')
@staticmethod
def remove_extra_whitespaces(text):
return ' '.join(text.strip().split());
@staticmethod
def remove_accent_marks(text):
return unidecode(text) | mit | Python |
|
8b828e9c9daacd8bd6b5719e0ee50fc93f3c612d | add line-invoker, allows pipeline to be changed on the fly | aliclark/irctail,aliclark/irctail | line-invoker.py | line-invoker.py | #!/usr/bin/python
from __future__ import print_function
import sys
import subprocess
# A normal(ish) pipeline looks like the following:
# tailf input | grep -v foo | grep bar | cat >>output
# If we want to change the valu "foo", "bar" or otherwise change the
# pipeline, we have to kill the old pipeline and start a new one.
# This script changes the above to
# tailf input | line-invoker.py mypipeline.sh | cat >>output
# where mypipeline.sh contains:
# grep -v foo | grep bar
# This allows the pipeline to be edited at will, without breaking the
# tailf and potentially having missed lines, or duplicated them on
# restarting tailf
def main():
prog = sys.argv[1]
try:
line = sys.stdin.readline()
while line:
p = subprocess.Popen(prog, stdin=subprocess.PIPE)
p.stdin.write(line)
p.stdin.close()
sys.stdout.flush()
line = sys.stdin.readline()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| isc | Python |
|
917708a749e2c9519cbb9841004a18eeff788af4 | Concatenate Final | Rahulsharma0810/M3u8Parser | Concatenate.py | Concatenate.py | # Copyright (c) 2017 Rahul V Sharma
# AUTHORS = Rahul Vinod Shaarma
# Website = www.rahul-sharma.com
# Email = sharmaR0810@gmail.com
# Don't Message Me unless Serios Help or you are not a hot girl.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# 720p = 1280 x 720
# 1080p = 1920 x 1080
# 1440p = 2560 x 1440
# 2160p = 3840 x 2160
# 4320p = 7680 x 4320
# Downloadding Master Playlist as Master
import urllib.request
import re
import os
# Creating MasterPlaylist Folder
if not os.path.exists("MasterPlaylist"):
os.makedirs("MasterPlaylist")
# Downloadding Master Playlist as 01_Master
PathM3U = 'https://player.vimeo.com/external/159463108.m3u8?s=d41bea2a0d7223e3bd161fcb549b2c668437f1c9&oauth2_token_id=410160086'
NameM3U = "01_Master.m3u"
# Download the file from `url` and save it locally under `file_name`:
urllib.request.urlretrieve(PathM3U, NameM3U)
# Matching Higher Resolution and selecting its URL
ResolutionMatchRegex = '(.*RESOLUTION=1920x1080.*)[\r\n]+([^\r\n]+)'
RawM3u = open(NameM3U, 'r')
RawM3uText = RawM3u.read()
ResolutionMatch = re.findall(ResolutionMatchRegex, RawM3uText)[0]
# Writing Regex Match to 02_Master.m3u
StringMatchFile = open('02_Master.m3u', 'w')
StringMatchFile.write(ResolutionMatch[1])
StringMatchFile.close()
# Downloadding Chop Playlist as 03_Master.m3u
PathM3U = ResolutionMatch[1]
NameM3U = "03_Master.m3u"
# Download the file from `url` and save it locally under `file_name`:
urllib.request.urlretrieve(PathM3U, NameM3U)
# Matching Filename and extention
ExtensionMatchRegex = '.*\/'
URLFile = open('02_Master.m3u', 'r')
URLText = URLFile.read()
ExtensionMatch = re.findall(ExtensionMatchRegex, URLText)[0]
# Writing Regex Match (without filename and extension)to 04_Master.m3u
URLExtentionFile = open('04_Master.m3u', 'w')
URLExtentionFile.write(ExtensionMatch)
URLExtentionFile.close()
# Opening 04_Master.m3u to take url pattern
URLFile = open('04_Master.m3u', 'r')
URLText = URLFile.read()
# opening 04_Master.m3u Segment File
with open('03_Master.m3u', 'r') as playlist:
ts_filenames = [line.rstrip() for line in playlist
if line.rstrip().endswith('.ts')]
StringMatchFile = open('MasterPlaylist/01_Master.m3u8', 'w')
for line in ts_filenames:
StringMatchFile.write(URLText)
StringMatchFile.write(line)
StringMatchFile.write("\n")
# Deleting 01_Master.m3u, 02_Master.m3u, 03_master.m3u, 04_master.m3u
os.remove('01_Master.m3u')
os.remove('02_Master.m3u')
os.remove('03_Master.m3u')
os.remove('04_Master.m3u')
| mit | Python |
|
80cb11187894870ba9fe40e09834522d7ea2ee10 | Create middleware.py | mpetyx/psymbiosys-rapidapps-middleware-workersTool,mpetyx/psymbiosys-rapidapps-middleware-workersTool | middleware.py | middleware.py | mit | Python |
||
aadd5b5d60e1fa2939482790baa893d9624ad33b | Create mnist_lstm.py | EderSantana/TwistedFate | mnist_lstm.py | mnist_lstm.py | from tensorflow.models.rnn import rnn_cell, rnn
import tensorflow as tf
import numpy as np
import input_data
sess = tf.Session()
'''
Classify MNIST using LSTM running row by row.
Good:
* No compilation time at all, which is cool.
Bad:
* Problem is that has all dimensions hard coded, which sucks.
Inspired by:
https://github.com/nlintz/TensorFlow-Tutorials
'''
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
def get_lstm(num_steps, input_dim, hidden_dim, output_dim, batch_size):
# Define input
input = tf.placeholder("float", [batch_size, num_steps, input_dim])
desired = tf.placeholder("float", [batch_size, 10])
# Define parameters
i2h = init_weights([input_dim, hidden_dim])
h2o = init_weights([hidden_dim, output_dim])
bi = init_weights([hidden_dim])
bo = init_weights([output_dim])
# prepare input
# input shape: (batches, num_steps, input_dim)
X2 = tf.transpose(input, [1, 0, 2]) # (num_steps, batch_size, input_dim)
# tf.reshape does not accept X.get_shape elements as input :(
X3 = tf.reshape(X2, [num_steps*batch_size, dim]) # (num_steps*batch_size, input_dim)
# project to hidden state dimension
X4 = tf.matmul(X3, i2h) + bi # (num_steps*batch_size, hidden_dim)
# LSTM for loop expects a list as input, here we slice X3 into pieces of (batch_size, hidden_dim)
# tf.split expects as input a axis to slice, number of slices and a tensor
Xh = tf.split(0, num_steps, X4)
initializer = tf.random_uniform_initializer(-.01, .01)
# INNER LOOP
# There are two ways of calculating the inner loop of an RNN
with tf.variable_scope("RNN", reuse=None, initializer=initializer): # this is necessary
lstm_cell = rnn_cell.BasicLSTMCell(hidden_dim, forget_bias=1.0)
initial_state = lstm_cell.zero_state(batch_size, tf.float32)
# Explicitly calling a for loop inside the scope
#for time_step, input_ in enumerate(inputs):
# if time_step > 0: tf.get_variable_scope().reuse_variables()
# (cell_output, state) = lstm_cell(input_, initial_state)
# outputs.append(cell_output)
# states.append(state)
# or simply using rnn(cell, inputs, initial_state=init_state)
lstm_outputs, lstm_states = rnn.rnn(lstm_cell, Xh, initial_state=initial_state)
sess.run(tf.initialize_all_variables()) # it didn't work for me initializing outside the scope
# calculate output
Y = lstm_outputs[-1] # outputs is a list, we get the last value
output = tf.matmul(Y, h2o) + bo
return input, output, desired
| mit | Python |
|
940c4f4238eac31f926e520dba473819abb44033 | Add a moksha.hub module with an initial OrbitedWidget | pombredanne/moksha,mokshaproject/moksha,lmacken/moksha,mokshaproject/moksha,pombredanne/moksha,mokshaproject/moksha,pombredanne/moksha,ralphbean/moksha,mokshaproject/moksha,ralphbean/moksha,pombredanne/moksha,lmacken/moksha,lmacken/moksha,ralphbean/moksha | moksha/hub.py | moksha/hub.py | # This file is part of Moksha.
#
# Moksha is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Moksha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Moksha. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2008, Red Hat, Inc.
# Authors: Luke Macken <lmacken@redhat.com>
"""
The Moksha Real-time Hub
"""
from tw.api import Widget, JSLink, js_callback, js_function
# @@ Make the orbited url globally configurable
ORBITED_URL = 'http://localhost:9000'
orbited_js = JSLink(link=ORBITED_URL + '/static/Orbited.js')
class OrbitedWidget(Widget):
params = {
'onopen': 'A javascript callback for when the connection opens',
'onread': 'A javascript callback for when new data is read',
'onclose': 'A javascript callback for when the connection closes',
}
javascript = [orbited_js]
onopen = onread = onclose = js_callback('function(){}')
template = """
<script type="text/javascript">
Orbited.settings.port = 9000
Orbited.settings.hostname = 'localhost'
document.domain = document.domain
TCPSocket = Orbited.TCPSocket
connect = function() {
conn = new TCPSocket()
conn.onread = ${onread}
conn.onopen = ${onopen}
conn.onclose = ${onclose}
conn.open('localhost', 9000)
}
$(document).ready(function() {
connect()
});
</script>
"""
| apache-2.0 | Python |
|
7780c235f0f357ab918f0c031e7dc51f6ca072a9 | Solve problem 20 | mazayus/ProjectEuler | problem020.py | problem020.py | #!/usr/bin/env python3
from functools import *
import operator
def factorial(number):
assert number >= 1
return reduce(operator.mul, range(1, number+1))
def digits(number):
yield from (int(digit) for digit in str(number))
print(sum(digits(factorial(100))))
| mit | Python |
|
6fcb3adbcf85aa8039274f59d2b26401b5927fc4 | Create PowerofFour_001.py | cc13ny/algo,Chasego/codirit,Chasego/codi,cc13ny/algo,Chasego/cod,Chasego/codirit,Chasego/codirit,Chasego/codi,Chasego/codirit,Chasego/cod,Chasego/codi,cc13ny/Allin,cc13ny/algo,Chasego/codi,Chasego/cod,Chasego/codi,cc13ny/Allin,cc13ny/Allin,cc13ny/Allin,cc13ny/Allin,Chasego/codirit,cc13ny/algo,Chasego/cod,Chasego/cod,cc13ny/algo | kargtom/twodim/PowerofFour/PowerofFour_001.py | kargtom/twodim/PowerofFour/PowerofFour_001.py | def isPowerOfFour(n):
return n > 0 and n & n - 1 is 0 and n & 0x5555555555555555 != 0
| mit | Python |
|
8a7fda2acf57c135e7f401ebdd8f71c3609c0eca | Create tries.py | saru95/DSA,saru95/DSA,saru95/DSA,saru95/DSA,saru95/DSA | Python/tries.py | Python/tries.py | def make_trie(*args):
trie={}
for word in args:
if type(word)!= str:
raise TypeError("Trie work only on strings")
# temp_trie and trie refer to the same dictionary object.
temp_trie=trie
for letter in word:
# here setdefault sets the letter to {}({'y':{}}) and then returns {} to temp_trie.
# So now temp_trie contains {} but trie points to ({'y':{}}).
# setdefault assigns the letter their value and returns {}
# That is why nesting takes place.
temp_trie=temp_trie.setdefault(letter,{})
temp_trie=temp_trie.setdefault('__end__','__end__')
return trie
def in_trie(trie,word):
if type(word)!= str:
raise TypeError("Trie work only on strings")
temp_trie=trie
for letter in word:
if letter not in temp_trie:
return False
temp_trie=temp_trie[letter]
if "__end__" in temp_trie:
return True
else:
return False
def remove(trie,word,depth):
if word and word[depth] not in trie:
return False
if len(word) == depth + 1:
if '__end__' in trie[word[depth]]:
del trie[word[depth]]['__end__']
if len(trie[word[depth]]) > 0 and len(trie) > 1:
return False
elif len(trie) > 1 :
del trie[word[depth]]
return False
elif len(trie[word[depth]]) > 0:
return False
else:
return True
else:
temp_trie = trie
# Recursively climb up to delete.
if remove(temp_trie[word[depth]], word, depth + 1):
if temp_trie:
del temp_trie[word[depth]]
return not temp_trie
else:
return False
trie=make_trie('hack','hackerrank')
print trie
print in_trie(trie,'hac')
print trie
| mit | Python |
|
23cf747a3ff24f75d3300547f4bfdecf10c4a325 | Add next traversal util function | scrappleapp/scrapple,AlexMathew/scrapple,AlexMathew/scrapple,scrappleapp/scrapple,AlexMathew/scrapple | scrapple/utils/config.py | scrapple/utils/config.py | """
scrapple.utils.config
~~~~~~~~~~~~~~~~~~~~~
Functions related to traversing the configuration file
"""
from __future__ import print_function
def traverse_next(page, next, results):
for link in page.extract_links(next['follow_link']):
print("Loading page", link.url)
r = results
for attribute in next['scraping'].get('data'):
if attribute['field'] != "":
print("\nExtracting", attribute['field'], "attribute", sep=' ')
r[attribute['field']] = link.extract_content(attribute['selector'], attribute['attr'])
if not next['scraping'].get('next'):
yield r
else:
for next2 in next['scraping'].get('next'):
for result in traverse_next(link, next2, r):
yield result
| mit | Python |
|
56b3cf07fff4d3794dcdbf99f6d7faa629fa243e | fix string manipulation in render_templatefile() | cleydson/scrapy,hansenDise/scrapy,tliber/scrapy,rolando/scrapy,eliasdorneles/scrapy,kmike/scrapy,tagatac/scrapy,starrify/scrapy,jdemaeyer/scrapy,mlyundin/scrapy,carlosp420/scrapy,finfish/scrapy,barraponto/scrapy,Zephor5/scrapy,Zephor5/scrapy,hectoruelo/scrapy,taito/scrapy,redapple/scrapy,rootAvish/scrapy,shaform/scrapy,cyberplant/scrapy,ssh-odoo/scrapy,rolando/scrapy,pablohoffman/scrapy,nfunato/scrapy,mgedmin/scrapy,ssteo/scrapy,kimimj/scrapy,crasker/scrapy,olafdietsche/scrapy,scrapy/scrapy,cyberplant/scrapy,zorojean/scrapy,shaform/scrapy,rklabs/scrapy,GregoryVigoTorres/scrapy,cleydson/scrapy,umrashrf/scrapy,zackslash/scrapy,elacuesta/scrapy,wangjun/scrapy,wenyu1001/scrapy,Parlin-Galanodel/scrapy,Bourneer/scrapy,cyrixhero/scrapy,starrify/scrapy,johnardavies/scrapy,umrashrf/scrapy,YeelerG/scrapy,ArturGaspar/scrapy,irwinlove/scrapy,pawelmhm/scrapy,1yvT0s/scrapy,wenyu1001/scrapy,lacrazyboy/scrapy,dacjames/scrapy,jeffreyjinfeng/scrapy,haiiiiiyun/scrapy,hectoruelo/scrapy,rootAvish/scrapy,taito/scrapy,pawelmhm/scrapy,w495/scrapy,moraesnicol/scrapy,Bourneer/scrapy,crasker/scrapy,ArturGaspar/scrapy,fafaman/scrapy,cyberplant/scrapy,dracony/scrapy,tagatac/scrapy,pawelmhm/scrapy,starrify/scrapy,Allianzcortex/scrapy,Preetwinder/scrapy,agreen/scrapy,rolando-contrib/scrapy,Preetwinder/scrapy,Chenmxs/scrapy,Parlin-Galanodel/scrapy,Cnfc19932/scrapy,ENjOyAbLE1991/scrapy,fpy171/scrapy,wujuguang/scrapy,darkrho/scrapy-scrapy,Zephor5/scrapy,carlosp420/scrapy,z-fork/scrapy,kazitanvirahsan/scrapy,Lucifer-Kim/scrapy,rahulsharma1991/scrapy,scrapy/scrapy,zjuwangg/scrapy,Preetwinder/scrapy,z-fork/scrapy,github-account-because-they-want-it/scrapy,Parlin-Galanodel/scrapy,kashyap32/scrapy,zorojean/scrapy,Cnfc19932/scrapy,darkrho/scrapy-scrapy,nowopen/scrapy,Adai0808/scrapy-1,cleydson/scrapy,Timeship/scrapy,ssh-odoo/scrapy,JacobStevenR/scrapy,rahulsharma1991/scrapy,mlyundin/scrapy,cursesun/scrapy,Digenis/scrapy,moraesnicol/scrapy,johnardavies/scrapy,dangra/scrapy,eliasdorneles/scrapy,dracony/scrapy,arush0311/scrapy,wangjun/scrapy,Lucifer-Kim/scrapy,elacuesta/scrapy,JacobStevenR/scrapy,ENjOyAbLE1991/scrapy,jc0n/scrapy,ssh-odoo/scrapy,ssteo/scrapy,wujuguang/scrapy,avtoritet/scrapy,avtoritet/scrapy,Lucifer-Kim/scrapy,finfish/scrapy,eLRuLL/scrapy,irwinlove/scrapy,ssteo/scrapy,dangra/scrapy,tagatac/scrapy,dgillis/scrapy,kmike/scrapy,redapple/scrapy,KublaikhanGeek/scrapy,shaform/scrapy,jeffreyjinfeng/scrapy,nfunato/scrapy,yidongliu/scrapy,huoxudong125/scrapy,Ryezhang/scrapy,huoxudong125/scrapy,w495/scrapy,jeffreyjinfeng/scrapy,nowopen/scrapy,Ryezhang/scrapy,irwinlove/scrapy,cyrixhero/scrapy,GregoryVigoTorres/scrapy,umrashrf/scrapy,zjuwangg/scrapy,haiiiiiyun/scrapy,TarasRudnyk/scrapy,kazitanvirahsan/scrapy,Chenmxs/scrapy,johnardavies/scrapy,tliber/scrapy,ArturGaspar/scrapy,xiao26/scrapy,cursesun/scrapy,hansenDise/scrapy,yidongliu/scrapy,TarasRudnyk/scrapy,z-fork/scrapy,avtoritet/scrapy,hectoruelo/scrapy,zjuwangg/scrapy,yidongliu/scrapy,Timeship/scrapy,csalazar/scrapy,YeelerG/scrapy,github-account-because-they-want-it/scrapy,crasker/scrapy,KublaikhanGeek/scrapy,pablohoffman/scrapy,zackslash/scrapy,wenyu1001/scrapy,olafdietsche/scrapy,songfj/scrapy,pombredanne/scrapy,mgedmin/scrapy,cursesun/scrapy,nikgr95/scrapy,Allianzcortex/scrapy,jdemaeyer/scrapy,livepy/scrapy,Chenmxs/scrapy,livepy/scrapy,darkrho/scrapy-scrapy,Cnfc19932/scrapy,carlosp420/scrapy,jc0n/scrapy,dacjames/scrapy,csalazar/scrapy,rolando-contrib/scrapy,kimimj/scrapy,huoxudong125/scrapy,taito/scrapy,kimimj/scrapy,famorted/scrapy,famorted/scrapy,nfunato/scrapy,eLRuLL/scrapy,pombredanne/scrapy,1yvT0s/scrapy,mlyundin/scrapy,Adai0808/scrapy-1,ENjOyAbLE1991/scrapy,foromer4/scrapy,raphaelfruneaux/scrapy,Adai0808/scrapy-1,zackslash/scrapy,fafaman/scrapy,1yvT0s/scrapy,moraesnicol/scrapy,Digenis/scrapy,w495/scrapy,zorojean/scrapy,Timeship/scrapy,fpy171/scrapy,yarikoptic/scrapy,scrapy/scrapy,eliasdorneles/scrapy,raphaelfruneaux/scrapy,foromer4/scrapy,rklabs/scrapy,Digenis/scrapy,barraponto/scrapy,nowopen/scrapy,kashyap32/scrapy,pombredanne/scrapy,kmike/scrapy,dgillis/scrapy,dgillis/scrapy,rolando-contrib/scrapy,eLRuLL/scrapy,nikgr95/scrapy,agreen/scrapy,GregoryVigoTorres/scrapy,YeelerG/scrapy,finfish/scrapy,mgedmin/scrapy,xiao26/scrapy,arush0311/scrapy,elacuesta/scrapy,JacobStevenR/scrapy,agreen/scrapy,yarikoptic/scrapy,xiao26/scrapy,Ryezhang/scrapy,livepy/scrapy,olafdietsche/scrapy,github-account-because-they-want-it/scrapy,Bourneer/scrapy,rklabs/scrapy,lacrazyboy/scrapy,rootAvish/scrapy,hansenDise/scrapy,jdemaeyer/scrapy,rahulsharma1991/scrapy,csalazar/scrapy,arush0311/scrapy,wangjun/scrapy,fpy171/scrapy,jc0n/scrapy,kazitanvirahsan/scrapy,barraponto/scrapy,tliber/scrapy,nikgr95/scrapy,yarikoptic/scrapy,pablohoffman/scrapy,kashyap32/scrapy,dracony/scrapy,cyrixhero/scrapy,foromer4/scrapy,dacjames/scrapy,haiiiiiyun/scrapy,raphaelfruneaux/scrapy,famorted/scrapy,lacrazyboy/scrapy,wujuguang/scrapy,redapple/scrapy,KublaikhanGeek/scrapy,TarasRudnyk/scrapy,rolando/scrapy,songfj/scrapy,Allianzcortex/scrapy,songfj/scrapy,fafaman/scrapy,dangra/scrapy | scrapy/utils/template.py | scrapy/utils/template.py | """Helper functions for working with templates"""
import os
import re
import string
def render_templatefile(path, **kwargs):
with open(path, 'rb') as file:
raw = file.read()
content = string.Template(raw).substitute(**kwargs)
render_path = path[:-len('.tmpl')] if path.endswith('.tmpl') else path
with open(render_path, 'wb') as file:
file.write(content)
if path.endswith('.tmpl'):
os.remove(path)
CAMELCASE_INVALID_CHARS = re.compile('[^a-zA-Z\d]')
def string_camelcase(string):
""" Convert a word to its CamelCase version and remove invalid chars
>>> string_camelcase('lost-pound')
'LostPound'
>>> string_camelcase('missing_images')
'MissingImages'
"""
return CAMELCASE_INVALID_CHARS.sub('', string.title())
| """Helper functions for working with templates"""
import os
import re
import string
def render_templatefile(path, **kwargs):
with open(path, 'rb') as file:
raw = file.read()
content = string.Template(raw).substitute(**kwargs)
with open(path.rstrip('.tmpl'), 'wb') as file:
file.write(content)
if path.endswith('.tmpl'):
os.remove(path)
CAMELCASE_INVALID_CHARS = re.compile('[^a-zA-Z\d]')
def string_camelcase(string):
""" Convert a word to its CamelCase version and remove invalid chars
>>> string_camelcase('lost-pound')
'LostPound'
>>> string_camelcase('missing_images')
'MissingImages'
"""
return CAMELCASE_INVALID_CHARS.sub('', string.title())
| bsd-3-clause | Python |
cefdd80e7cd9e4ce007e60c08114e89a46b15de7 | Truncate a protein sequence to remove signal peptide. | tdangkhoa/calford | RemoveSignal.py | RemoveSignal.py | #!/usr/bin/python
# Copyright (c) 2014 Khoa Tran. All rights reserved.
from CalFord import *
import argparse
import sys,os
import re
signalFile = None
configPath = "calford.conf"
noSignalOutputFile = None
removedSignalOutputFile = None
def argsSanityCheck():
isOk = True
if not os.path.isfile(signalFile):
print "Error: cannot find %s"%signalFile
isOk = False
return isOk
def parseArgs():
global configPath
global signalFile
global noSignalOutputFile
global removedSignalOutputFile
parser = argparse.ArgumentParser(
description="Read the protein signal file and generate two FASTA file: "\
"one file contains proteins without signal, the other one "\
"contains processed proteins, whose signal sequence has been "\
"truncated.")
parser.add_argument("signalFile",help="input protein signal analysis result")
parser.add_argument("--config",help="path to config file",
nargs=1)
parser.add_argument("--outputNoSignal",help="write protein without signal to "\
"this file",
nargs=1,required=True)
parser.add_argument("--outputTruncatedSignal",help="write protein with signal "\
"sequence truncated to this file",
nargs=1,required=True)
args = parser.parse_args()
signalFile = args.signalFile
noSignalOutputFile = args.outputNoSignal[0]
removedSignalOutputFile = args.outputTruncatedSignal[0]
if args.config!=None:
configPath = args.config[0]
if not argsSanityCheck():
print
exit(1)
def loadSignalAnalysis(path):
TRACE5("Load signal result from %s"%path)
signalData = {}
noSignalCount = 0
noCleaveCount = 0
truncatedCount = 0
try:
f = open(path,'r')
signalRe = re.compile('(\S+)\s+(Signal .*)')
cleaveRe = re.compile('after AA (\d+)')
for line in f:
m = signalRe.match(line)
if m==None:
# no signal found
noSignalCount += 1
continue
pid = m.group(1)
m2 = cleaveRe.search(m.group(2))
if m2==None:
signalData[pid] = 0
noCleaveCount += 1
else:
signalData[pid] = int(m2.group(1))
truncatedCount += 1
f.close()
TRACE9("Found %d proteins with no signal, %d proteins with no cleave location "\
"and %d proteins has been truncated"\
%(noSignalCount,noCleaveCount,truncatedCount))
except IOError,e:
print "Error reading signal file: %s"%str(e)
return None
return signalData
def writeNoSignalProtein(fastaDb,data):
TRACE5("Writing no signal proteins to output file at %s"%noSignalOutputFile)
try:
f = open(noSignalOutputFile,'w')
except IOError,e:
print "Error writing no signal output file: %s"%str(e)
return
for p in fastaDb:
if p not in data:
f.write("%s\n"%fastaDb[p])
f.close()
def renameProtein(proteinDesc,suffix='.nosignal'):
proteinIdRe = re.compile('>(\S+)\s+(.*)')
m = proteinIdRe.match(proteinDesc)
if m==None:
TRACE0("Cannot parse protein desc: %s"%proteinDesc)
return None
return m.group(1)+suffix
def truncateSignalProtein(fastaDb,data):
TRACE5("Truncate signal proteins")
result = {}
for pid in data:
loc = data[pid]
if pid not in fastaDb:
TRACE0("Error: cannot find %s in FASTA database"%pid)
continue
p = fastaDb[pid]
s = p.split('\n')
newPid = renameProtein(s[0])
if newPid==None:
continue
seq = s[1]
if loc>=len(seq):
TRACE0("Error: cleaved location %d is larger than sequence len (%d)"\
%(loc,len(seq)))
seq = seq[loc:]
result[newPid] = ">"+newPid+"\n"+seq
return result
def writeTruncatedProtein(data):
TRACE5("Writing truncated signal proteins to output file at %s"%removedSignalOutputFile)
try:
f = open(removedSignalOutputFile,'w')
except IOError,e:
print "Error writing truncated signal output file: %s"%str(e)
return
for p in data:
f.write("%s\n"%data[p])
f.close()
parseArgs()
parseConfigFile(configPath)
print "Write no signal proteins to: %s"%noSignalOutputFile
print "Write truncated signal proteins to: %s"%removedSignalOutputFile
fastaDb = loadFasta(config['database'])
if fastaDb==None:
# error
print "Error: load FASTA file error"
exit(1)
signalData = loadSignalAnalysis(signalFile)
if signalData==None:
# error
exit(1)
truncatedDb = truncateSignalProtein(fastaDb,signalData)
writeNoSignalProtein(fastaDb,signalData)
writeTruncatedProtein(truncatedDb)
| mit | Python |
|
fd8b325bb6423c2f56d84006763ec8f6696a2745 | Test basic paths | Kozea/WeasyPrint,Kozea/WeasyPrint | tests/test_draw/svg/test_paths.py | tests/test_draw/svg/test_paths.py | """
weasyprint.tests.test_draw.svg.test_paths
------------------------------------------
Test how SVG simple paths are drawn.
"""
from ...testing_utils import assert_no_logs
from .. import assert_pixels
@assert_no_logs
def test_path_Hh():
assert_pixels('path_Hh', 10, 10, '''
BBBBBBBB__
BBBBBBBB__
__________
RRRRRRRR__
RRRRRRRR__
__________
GGGGGGGG__
GGGGGGGG__
BBBBBBBB__
BBBBBBBB__
''', '''
<style>
@page { size: 10px }
svg { display: block }
</style>
<svg width="10px" height="10px" xmlns="http://www.w3.org/2000/svg">
<path d="M 0 1 H 8 H 1"
stroke="blue" stroke-width="2" fill="none"/>
<path d="M 0 4 H 8 4"
stroke="red" stroke-width="2" fill="none"/>
<path d="M 0 7 h 8 h 0"
stroke="lime" stroke-width="2" fill="none"/>
<path d="M 0 9 h 8 0"
stroke="blue" stroke-width="2" fill="none"/>
</svg>
''')
@assert_no_logs
def test_path_Vv():
assert_pixels('path_Vv', 10, 10, '''
BB____GG__
BB____GG__
BB____GG__
BB____GG__
___RR_____
___RR_____
___RR___BB
___RR___BB
___RR___BB
___RR___BB
''', '''
<style>
@page { size: 10px }
svg { display: block }
</style>
<svg width="10px" height="10px" xmlns="http://www.w3.org/2000/svg">
<path d="M 1 0 V 1 V 4"
stroke="blue" stroke-width="2" fill="none"/>
<path d="M 4 6 V 4 10"
stroke="red" stroke-width="2" fill="none"/>
<path d="M 7 0 v 0 v 4"
stroke="lime" stroke-width="2" fill="none"/>
<path d="M 9 6 v 0 4"
stroke="blue" stroke-width="2" fill="none"/>
</svg>
''')
@assert_no_logs
def test_path_Ll():
assert_pixels('path_Ll', 10, 10, '''
______RR__
______RR__
______RR__
___BB_RR__
___BB_RR__
___BB_RR__
___BB_____
___BB_____
___BB_____
___BB_____
''', '''
<style>
@page { size: 10px }
svg { display: block }
</style>
<svg width="10px" height="10px" xmlns="http://www.w3.org/2000/svg">
<path d="M 4 3 L 4 10"
stroke="blue" stroke-width="2" fill="none"/>
<path d="M 7 0 l 0 6"
stroke="red" stroke-width="2" fill="none"/>
</svg>
''')
@assert_no_logs
def test_path_Zz():
assert_pixels('path_Zz', 10, 10, '''
BBBBBBB___
BBBBBBB___
BB___BB___
BB___BB___
BBBBBBB___
BBBBBBB___
____RRRRRR
____RRRRRR
____RR__RR
____RRRRRR
''', '''
<style>
@page { size: 10px }
svg { display: block }
</style>
<svg width="10px" height="10px" xmlns="http://www.w3.org/2000/svg">
<path d="M 1 1 H 6 V 5 H 1 Z"
stroke="blue" stroke-width="2" fill="none"/>
<path d="M 9 10 V 7 H 5 V 10 z"
stroke="red" stroke-width="2" fill="none"/>
</svg>
''')
| bsd-3-clause | Python |
|
cc0ef22d0fb122b2c28e6004843978a0ee9e255f | Create Pinject.py | OffensivePython/Pinject,0x0mar/Pinject,xujun10110/Pinject,saydulk/Pinject | Pinject.py | Pinject.py | import socket
import struct
import sys
from optparse import OptionParser
def checksum(data):
s = 0
n = len(data) % 2
for i in range(0, len(data)-n, 2):
s+= ord(data[i]) + (ord(data[i+1]) << 8)
if n:
s+= ord(data[i+1])
while (s >> 16):
s = (s & 0xFFFF) + (s >> 16)
s = ~s & 0xffff
return s
class ip(object):
def __init__(self, source, destination):
self.version = 4
self.ihl = 5 # Internet Header Length
self.tos = 0 # Type of Service
self.tl = 0 # total length will be filled by kernel
self.id = 54321
self.flags = 0
self.offset = 0
self.ttl = 255
self.protocol = socket.IPPROTO_TCP
self.checksum = 0 # will be filled by kernel
self.source = socket.inet_aton(source)
self.destination = socket.inet_aton(destination)
def pack(self):
ver_ihl = (self.version << 4) + self.ihl
flags_offset = (self.flags << 13) + self.offset
ip_header = struct.pack("!BBHHHBBH4s4s",
ver_ihl,
self.tos,
self.tl,
self.id,
flags_offset,
self.ttl,
self.protocol,
self.checksum,
self.source,
self.destination)
return ip_header
class tcp(object):
def __init__(self, srcp, dstp):
self.srcp = srcp
self.dstp = dstp
self.seqn = 0
self.ackn = 0
self.offset = 5 # Data offset: 5x4 = 20 bytes
self.reserved = 0
self.urg = 0
self.ack = 0
self.psh = 0
self.rst = 0
self.syn = 1
self.fin = 0
self.window = socket.htons(5840)
self.checksum = 0
self.urgp = 0
self.payload = ""
def pack(self, source, destination):
data_offset = (self.offset << 4) + 0
flags = self.fin + (self.syn << 1) + (self.rst << 2) + (self.psh << 3) + (self.ack << 4) + (self.urg << 5)
tcp_header = struct.pack('!HHLLBBHHH',
self.srcp,
self.dstp,
self.seqn,
self.ackn,
data_offset,
flags,
self.window,
self.checksum,
self.urgp)
#pseudo header fields
source_ip = source
destination_ip = destination
reserved = 0
protocol = socket.IPPROTO_TCP
total_length = len(tcp_header) + len(self.payload)
# Pseudo header
psh = struct.pack("!4s4sBBH",
source_ip,
destination_ip,
reserved,
protocol,
total_length)
psh = psh + tcp_header + self.payload
tcp_checksum = checksum(psh)
tcp_header = struct.pack("!HHLLBBH",
self.srcp,
self.dstp,
self.seqn,
self.ackn,
data_offset,
flags,
self.window)
tcp_header+= struct.pack('H', tcp_checksum) + struct.pack('!H', self.urgp)
return tcp_header
def main():
parser = OptionParser()
parser.add_option("-s", "--src", dest="src", type="string",
help="Source IP address", metavar="IP")
parser.add_option("-d", "--dst", dest="dst", type="string",
help="Destination IP address", metavar="IP")
options, args = parser.parse_args()
if options.dst == None:
parser.print_help()
sys.exit()
else:
dst_host = socket.gethostbyname(options.dst)
if options.src == None:
# get the current Network Interface
src_host = socket.gethostbyname(socket.gethostname())
else:
src_host = options.src
print("[+] Local Machine: %s"%src_host)
print("[+] Remote Machine: %s"%dst_host)
s = socket.socket(socket.AF_INET,
socket.SOCK_RAW,
socket.IPPROTO_RAW)
print("[+] Raw scoket created")
data = "TEST!!"
print("[+] Data to inject: %s"%data)
# IP Header
print("[+] Constructing IP Header")
ipobj = ip(src_host, dst_host)
iph = ipobj.pack()
# TCP Header
print("[+] Constructing TCP Header")
tcpobj = tcp(1234, 80)
tcpobj.payload = data
tcph = tcpobj.pack(ipobj.source,
ipobj.destination) # tcp header
# Packet Injection
packet = iph + tcph + data
s.sendto(packet, (dst_host, 0))
print("[+] Packet Injected!")
if __name__=="__main__":
main()
| mit | Python |
|
ec484a404752c60a7c88ae84f79b4792c777dfd4 | Define ESCO ua and eu tender models | Scandie/openprocurement.tender.esco,openprocurement/openprocurement.tender.esco | openprocurement/tender/esco/models.py | openprocurement/tender/esco/models.py | from zope.interface import implementer
from schematics.types import StringType
from openprocurement.api.models import ITender
from openprocurement.tender.openua.models import (
Tender as BaseTenderUA,
)
from openprocurement.tender.openeu.models import (
Tender as BaseTenderEU,
)
@implementer(ITender)
class Tender(BaseTenderUA):
""" """
procurementMethodType = StringType(default="esco.UA")
TenderESCOUA = Tender
@implementer(ITender)
class Tender(BaseTenderEU):
""" """
procurementMethodType = StringType(default="esco.EU")
TenderESCOEU = Tender
| apache-2.0 | Python |
|
82b9a66ea826b4463d82c69ba1703eab213efe83 | Add test for stack outputs | cwolferh/heat-scratch,noironetworks/heat,jasondunsmore/heat,jasondunsmore/heat,noironetworks/heat,openstack/heat,steveb/heat,dims/heat,steveb/heat,dims/heat,openstack/heat,cwolferh/heat-scratch | heat_integrationtests/functional/test_stack_outputs.py | heat_integrationtests/functional/test_stack_outputs.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_integrationtests.functional import functional_base
class StackOutputsTest(functional_base.FunctionalTestsBase):
template = '''
heat_template_version: 2015-10-15
resources:
test_resource_a:
type: OS::Heat::TestResource
properties:
value: 'a'
test_resource_b:
type: OS::Heat::TestResource
properties:
value: 'b'
outputs:
resource_output_a:
description: 'Output of resource a'
value: { get_attr: [test_resource_a, output] }
resource_output_b:
description: 'Output of resource b'
value: { get_attr: [test_resource_b, output] }
'''
def test_outputs(self):
stack_identifier = self.stack_create(
template=self.template
)
expected_list = [{u'output_key': u'resource_output_a',
u'description': u'Output of resource a'},
{u'output_key': u'resource_output_b',
u'description': u'Output of resource b'}]
actual_list = self.client.stacks.output_list(
stack_identifier)['outputs']
self.assertEqual(expected_list, actual_list)
expected_output_a = {
u'output_value': u'a', u'output_key': u'resource_output_a',
u'description': u'Output of resource a'}
expected_output_b = {
u'output_value': u'b', u'output_key': u'resource_output_b',
u'description': u'Output of resource b'}
actual_output_a = self.client.stacks.output_show(
stack_identifier, 'resource_output_a')['output']
actual_output_b = self.client.stacks.output_show(
stack_identifier, 'resource_output_b')['output']
self.assertEqual(expected_output_a, actual_output_a)
self.assertEqual(expected_output_b, actual_output_b)
| apache-2.0 | Python |
|
78df4f45ea4b8c04ba8f34d8fc356345998c616b | Add TelnetServer.py under version control. | niucheng/TelnetServer | TelnetServer.py | TelnetServer.py | #!/usr/bin/env python
# coding: utf-8
import socket
import threading
welcome_slogan = '''Welcome novice!\r\n\
Type something and hit enter to see what happens.\r\n\
Be bold!\r\n\r\n'''
help_message = '''Command Description\r\n\
=============================================================\r\n\
HELP Print this help message\r\n\
TALK 'MESSAGE' Talk to other users in the same telnet system\r\n\
EXIT Quit the telnet service\r\n\r\n\
At your service. 20140819\r\n\r\n'''
goodbye_farewell = '''Have a lot of fun!\r\n'''
PS1 = 'TELNET# '
HOST = ''
PORT = 56789
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(5)
clients = [] # list of clients connected
lock = threading.Lock()
class telnetServer(threading.Thread):
def __init__(self, bind):
threading.Thread.__init__(self)
(self.socket, self.address) = bind
def run(self):
lock.acquire()
clients.append(self)
lock.release()
print ('+ %s:%s connected.' % self.address)
self.socket.send(welcome_slogan.encode())
while True:
self.socket.send(PS1.encode())
data = self.socket.recv(1024)
temp = data.decode().strip()
if not data:
break
elif temp.upper() in ['BY', 'BYE', 'QUIT', 'EXIT']:
break
elif temp.lower() in ['?', 'help']:
self.socket.send(help_message.encode())
elif temp.startswith('#') or temp == '':
pass
elif temp[:5].upper() == 'TALK ':
print ('%s %s' % (self.address, temp[5:]))
for c in clients:
c.socket.send(('%s %s\r\n' % (self.address, temp[5:])).encode())
else:
self.socket.send(data)
self.socket.send(goodbye_farewell.encode())
self.socket.close()
print ('- %s:%s disconnected.' % self.address)
lock.acquire()
clients.remove(self)
lock.release()
while True: # wait for socket to connect
# send socket to telnetserver and start monitoring
telnetServer(s.accept()).start()
| mit | Python |
|
0d8f3f3d1386236a084bdeb66e27ffa64fcd81a8 | Sort numbers by quick sort | Pyronia/cvut.zal | 06/sort.py | 06/sort.py | # Machiavelli: "Divide and rule", Quick sort
def sortNumbersBeforeRefactoring(weights, condition):
left = []
center = []
right = []
if condition == 'ASC':
if len(weights) > 1:
pivot = weights[0]
for element in weights:
if pivot > element:
left.append(element)
elif pivot == element:
center.append(element)
else:
right.append(element)
return sortNumbersBeforeRefactoring(left, 'ASC') + center + sortNumbersBeforeRefactoring(right, 'ASC')
else:
return weights
elif condition == 'DESC':
if len(weights) > 1:
pivot = weights[0]
for element in weights:
if pivot < element:
left.append(element)
elif pivot == element:
center.append(element)
else:
right.append(element)
return sortNumbersBeforeRefactoring(left, 'DESC') + center + sortNumbersBeforeRefactoring(right, 'DESC')
else:
return weights
else:
raise ValueError('Invalid input data')
def sortNumbers(weights, condition):
left = []
center = []
right = []
if len(weights) > 1:
pivot = weights[0]
for element in weights:
if pivot > element:
left.append(element)
elif pivot == element:
center.append(element)
else:
right.append(element)
return sortNumbers(left, condition) + center + sortNumbers(right, condition)
else:
return weights
def sortData(weights, data, condition):
pass
def runTests():
result = sortNumbers([4, 2, 3], 'ASC')
expected = [2, 3, 4]
if result != expected:
print('\n*/*/*/*/* UNEXPECTED RESULT */*/*/*/*')
print('sortNumbers([4, 2, 3], \'ASC\'), expected: ' + str(expected) + ' actual: ' + str(result))
print('*/*/*/*/* UNEXPECTED RESULT */*/*/*/*\n')
runTests()
| apache-2.0 | Python |
|
eaeb02839913136909cccc9a99612a1eb7145b97 | support state hash in ota restore if specified | puttarajubr/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,SEL-Columbia/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,gmimano/commcaretest,dimagi/commcare-hq,dimagi/commcare-hq,gmimano/commcaretest,gmimano/commcaretest,dimagi/commcare-hq,SEL-Columbia/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq | corehq/apps/ota/views.py | corehq/apps/ota/views.py | from corehq.apps.users.models import CouchUser
from django_digest.decorators import *
from casexml.apps.phone.restore import generate_restore_response
@httpdigest
def restore(request, domain):
"""
We override restore because we have to supply our own
user model (and have the domain in the url)
"""
user = request.user
restore_id = request.GET.get('since')
api_version = request.GET.get('version', "1.0")
state_hash = request.GET.get('state')
username = user.username
couch_user = CouchUser.from_django_user(user)
if not couch_user.is_commcare_user():
response = HttpResponse("No linked chw found for %s" % username)
response.status_code = 401 # Authentication Failure
return response
return generate_restore_response(couch_user.to_casexml_user(), restore_id,
api_version, state_hash)
| from corehq.apps.users.models import CouchUser
from django_digest.decorators import *
from casexml.apps.phone.restore import generate_restore_payload
@httpdigest
def restore(request, domain):
"""
We override restore because we have to supply our own
user model (and have the domain in the url)
"""
user = request.user
restore_id = request.GET.get('since')
api_version = request.GET.get('version', "1.0")
username = user.username
couch_user = CouchUser.from_django_user(user)
if not couch_user.is_commcare_user():
response = HttpResponse("No linked chw found for %s" % username)
response.status_code = 401 # Authentication Failure
return response
response = generate_restore_payload(couch_user.to_casexml_user(), restore_id,
api_version)
return HttpResponse(response, mimetype="text/xml") | bsd-3-clause | Python |
dca0404e6f14194be3a5926e522bbeea375e8456 | add net spider rokic's version | KIDJourney/sbeamhub,KIDJourney/sbeamhub | crawler/discount_info.py | crawler/discount_info.py | import json
import requests
from bs4 import BeautifulSoup
DOMAIN = ""
API = "http://%s/api/" % (DOMAIN)
STEAMDB_SALE_URL = "https://steamdb.info/sales/?merged=true&cc=cn"
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) Gecko/20100101 Firefox/13.0'
}
r = requests.get(STEAMDB_SALE_URL, header=headers)
content = r.content.decode().replace('\n', '')
jar = BeautifulSoup(content, 'lxml').tbody
sweets = ['name', 'discount', 'price', 'rating']
box = []#空箱子
for cookies in jar:#拿出罐子里的曲奇饼
try:
bottle = {'id':cookies['data-appid'], 'type':'game'}#装红酒
except KeyError:
bottle = {'id':cookies['data-subid'], 'type':'package'}#或者装白酒
cast = lambda magic: None if not magic else magic.string if magic.string else cast(magic.findChild())
flour = cookies.findChildren('td')#揉揉面粉
biscuits = [cast(i) for i in flour[2:5] + [flour[6]]]#做点小饼干
bottle.update(zip(sweets, biscuits))#每瓶酒附赠点零食
box.append(bottle) #装箱
request.post(API, json.dumps(box)) | mit | Python |
|
1602513f2ee508ed70ec08af90a94cf150d14189 | Add grep_token_logs.py | google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot | skolo/grep_token_logs.py | skolo/grep_token_logs.py | #!/usr/bin/env python
# Copyright 2018 Google LLC.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Search the syslog on a jumphost to determine when auth tokens changed."""
import sys
SYSLOG = '/var/log/syslog'
WHITELIST_LINES = [
# (process-name, pattern)
('metadata-server', 'Updated token: '),
('metadata-server', 'Token requested by '),
('get-oauth2-token', 'Wrote new auth token: '),
]
def transform_line(line):
"""Trim the log line and return it iff it matches a whitelisted pattern."""
for proc, pattern in WHITELIST_LINES:
if pattern in line:
# Log lines look like this:
# pylint: disable=line-too-long
# Mar 12 09:58:43 jumphost-win-02 metadata-server[5259]: I0312 09:58:43.756257 5259 server.go:87] Updated token: [redacted]
timestamp = line.split('jumphost', 1)[0]
suffix = line.split(pattern, 1)[1].rstrip()
return timestamp + proc + ': ' + pattern + suffix
return None
def read_syslog():
"""Read the syslog, returning any relevant lines."""
lines = []
with open(SYSLOG, 'rb') as f:
for line in f:
tf = transform_line(line)
if tf:
lines.append(tf)
return lines
def filter_logs(ip, log_lines):
"""Filter the log lines to only those relevant to a particular IP address."""
# First, collect all tokens used by the IP address.
tokens = []
for line in log_lines:
if ip and ip in line:
tok = line.split(', serving ', 1)[1]
tokens.append(tok)
# Filter to only lines which contain the IP address or one of its tokens.
filtered = []
for line in log_lines:
if ip in line:
filtered.append(line)
else:
for tok in tokens:
# We don't care about other bots which used the token.
if tok in line and not 'Token requested by' in line:
filtered.append(line)
return filtered
def main():
"""Read the syslog, filter to relevant lines, then print them."""
lines = read_syslog()
if len(sys.argv) > 1:
lines = filter_logs(sys.argv[1], lines)
for line in lines:
print line
if __name__ == '__main__':
main()
| bsd-3-clause | Python |
|
04270ab58f88302f7b0fcd314ae29258c1c9a043 | create mi matrix | googleinterns/e2e-convrec | data/build_probe_data.py | data/build_probe_data.py | import tensorflow.compat.v1 as tf
from tqdm import tqdm
from collections import defaultdict
import sklearn
import numpy as np
import json
with tf.io.gfile.GFile("gs://e2e_central/data/ml-sequences-train.tsv", 'r') as f:
sequence_list = list(f)
data = []
for sequence_str in tqdm(sequence_list):
data.append([x.strip() for x in sequence_str.replace("\n", "").replace('\t', "").split("@") if x.strip() != ""])
with tf.io.gfile.GFile("gs://e2e_central/data/ml-sequences-test.tsv", 'r') as f:
sequence_list = list(f)
for sequence_str in tqdm(sequence_list):
data.append([x.strip() for x in sequence_str.replace("\n", "").replace('\t', "").split("@") if x.strip() != ""])
# def write_json(filepath, dictionary):
# with tf.io.gfile.GFile(filepath, 'w') as f:
# json.dump(dictionary, filepath)
# def write_json(filepath, dictionary):
# with tf.io.gfile.GFile(filepath, 'w') as f:
# json.dump(dictionary, filepath)
movie_set = set()
popularity = defaultdict(int)
for seq in data:
for movie in seq:
movie_set.add(movie)
popularity[movie] += 1
# for seq in data:
# if len(set(seq)) != 10:
# print(seq)
num_sequences = len(data)
popular_movies = list(sorted(movie_set, key=lambda x: popularity[x], reverse=True))
movie_set = sorted(movie_set)
vocab_size = len(movie_set)
embed = dict(zip(movie_set, list(range(vocab_size))))
unembed = dict(zip(list(range(vocab_size)), movie_set))
movie_ids = {
"all_movies": movie_set,
"movie_count": vocab_size,
"movie_to_id": embed,
"id_to_movie": unembed,
"popularity": popularity
}
with tf.io.gfile.GFile("gs://e2e_central/data/probes/movie_id_info.json", 'w') as f:
json.dump(movie_ids, f)
def create_cooccurrence(sequences):
co_matrix = np.zeros((vocab_size, vocab_size))
print("building cooccurrence matrix")
for seq in tqdm(sequences):
for movie1 in seq:
for movie2 in seq:
co_matrix[embed[movie1]][embed[movie2]] += 1
return co_matrix
def get_mutual_info(co_matrix):
total = np.sum(co_matrix)
popularities = np.array([popularity[unembed[x]] for x in range(vocab_size)])
pxy = co_matrix / num_sequences
px = popularities / num_sequences
py = (popularities / num_sequences).T
mutual_info = np.log(pxy / (px @ py))
return mutual_info
co_matrix = create_cooccurrence(data)
mi = get_mutual_info(co_matrix)
with tf.io.gfile.GFile("gs://e2e_central/data/probes/co_matrix.npy", 'w') as f:
np.save(f, co_matrix)
with tf.io.gfile.GFile("gs://e2e_central/data/probes/mi_matrix.npy", 'w') as f:
np.save(f, mi)
def get_related_movies(movie="The Lord of the Rings: The Return of the King (2003)"):
movie_number = embed[movie]
row = co_matrix[movie_number, :]
return [unembed[x] for x in np.argsort(row)][::-1]
print(get_related_movies()[:10])
print("popular: ", popular_movies[:10])
print("unpopular: ", popular_movies[-10:])
def display_10(matrix):
pop = popular_movies[:10]
pop_ids = [embed[x] for x in pop]
print(pop)
print(matrix[pop_ids, :][:, pop_ids])
display_10(co_matrix)
display_10(mi)
| apache-2.0 | Python |
|
417ff63118c967205ee630c5183b19a949a6c157 | Add migrations for indicadores. | erikiado/jp2_online,erikiado/jp2_online,erikiado/jp2_online | indicadores/migrations/0002_auto_20170224_1535.py | indicadores/migrations/0002_auto_20170224_1535.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-02-24 15:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('indicadores', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='ingreso',
name='fecha',
field=models.DateField(),
),
]
| mit | Python |
|
b7bf4586fea207453225a87fb85df59ccfc94e80 | Add missing migration related to django-simple-history update | datasciencebr/jarbas,datasciencebr/jarbas,marcusrehm/serenata-de-amor,marcusrehm/serenata-de-amor,datasciencebr/jarbas,datasciencebr/serenata-de-amor,datasciencebr/serenata-de-amor,marcusrehm/serenata-de-amor,datasciencebr/jarbas,marcusrehm/serenata-de-amor | jarbas/core/migrations/0032_auto_20170613_0641.py | jarbas/core/migrations/0032_auto_20170613_0641.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-13 09:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0031_add_index_together_for_reimbursement'),
]
operations = [
migrations.AlterField(
model_name='historicalreimbursement',
name='history_type',
field=models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1),
),
]
| mit | Python |
|
60f051590a61ec4435f9bc5d46e430c5feb36f16 | Add agent | eaufavor/chrome-webpage-profiler-webui,eaufavor/chrome-webpage-profiler-webui,eaufavor/chrome-webpage-profiler-webui,eaufavor/chrome-webpage-profiler-webui | agent/agent.py | agent/agent.py | #!/usr/bin/env python
#http://www.acmesystems.it/python_httpd
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import json, subprocess, os, time
HELLO_MESSAGE = {'message':'hello, please use JSON via POST!'}
ERROR_JSON_MESSAGE = {'message':'POST content type must be application/json!'}
ERROR_BADJSON_MESSAGE = {'message':'POST content must be valid json!'}
ERROR_BADPASS_MESSAGE = {'message':'Wrong secret key!'}
ERROR_CMDERROR_MESSAGE = {'message':'Bad command!'}
ACTIONS = {'run-test', 'self-test'}
TMP = os.path.abspath(r'./tmp')
#TEST_DRIVER = os.path.abspath(r'../chrome-webpage-profiler/test_driver.py')
TEST_DRIVER = os.path.abspath(r'/bin/cat')
# NOTE: the key is to avoid unintentional access, not to secure the agent
SECRET_KEY = '1a2b'
def run_test(body):
if not body.get('tests-config'):
return json.dumps({'message': ERROR_CMDERROR_MESSAGE})
if not os.path.isdir(TMP):
try:
os.makedirs(TMP)
except Exception as _:
msg = 'Error making output directory: %s', TMP
return json.dumps({'message': msg})
if not os.path.isfile(TEST_DRIVER):
msg = 'No test driver found at %s' % TEST_DRIVER
return json.dumps({'message': msg})
jobId = "%d"%(time.time()*1000)
jobIdIndex = jobId[-5:]
jobIdIndexPath = os.path.join(TMP, jobIdIndex)
jobIdPath = os.path.join(jobIdIndexPath, jobId)
testConfig = os.path.join(jobIdPath, 'tests.json')
if not os.path.isdir(jobIdIndexPath):
try:
os.makedirs(jobIdIndexPath)
except Exception as _:
msg = 'Error making output directory: %s', jobIdIndexPath
return json.dumps({'message': msg})
if not os.path.isdir(jobIdPath):
try:
os.makedirs(jobIdPath)
except Exception as _:
msg = 'Error making output directory: %s', jobIdPath
return json.dumps({'message': msg})
tests = body['tests-config']
with open(testConfig, 'w') as outfile:
json.dump(tests, outfile)
p = subprocess.Popen([TEST_DRIVER, testConfig], cwd=jobIdPath)
rc = p.wait()
if rc == 0:
response = {'message': 'OK. Done', 'job-id': jobId}
response['files'] = []
for f in os.listdir(jobIdPath):
response['files'].append(os.path.join('/tmp/', jobIdIndex, jobId, f))
return json.dumps(response)
else:
return json.dumps({'message': 'FAIL. return code%d'%rc})
def self_test():
response = {'message': 'self test done', 'results': {} }
rc = subprocess.check_output('df -h; exit 0', stderr=subprocess.STDOUT, shell=True)
response['results']['df'] = rc
return json.dumps(response)
def execute_POST(body):
try:
body = json.loads(body)
except ValueError as _:
return json.dumps(ERROR_BADJSON_MESSAGE)
if body.get('key') != SECRET_KEY:
return json.dumps(ERROR_BADPASS_MESSAGE)
if body.get('action') not in ACTIONS:
return json.dumps(ERROR_CMDERROR_MESSAGE)
if body['action'] == 'run-test':
return run_test(body)
elif body['action'] == 'self-test':
return self_test()
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_GET(self):
self._set_headers()
body = json.dumps(HELLO_MESSAGE)
self.wfile.write(body)
def do_HEAD(self):
self._set_headers()
def do_POST(self):
self._set_headers()
content_len = int(self.headers.getheader('content-length', 0))
content_type = self.headers.getheader('content-type', 0)
if content_type.lower() != 'application/json':
response_body = json.dumps(ERROR_JSON_MESSAGE)
else:
post_body = self.rfile.read(content_len)
response_body = execute_POST(post_body)
self.wfile.write(response_body)
def run(server_class=HTTPServer, handler_class=S, port=80):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print 'Starting httpd...'
httpd.serve_forever()
if __name__ == "__main__":
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
| apache-2.0 | Python |
|
7082f80d5be56073d9d2a66653188b2cee248a8e | add basic tests of search and matrix views | hms-dbmi/fourfront,hms-dbmi/fourfront,ENCODE-DCC/snovault,ENCODE-DCC/snovault,ENCODE-DCC/snovault,ENCODE-DCC/encoded,T2DREAM/t2dream-portal,hms-dbmi/fourfront,4dn-dcic/fourfront,ENCODE-DCC/encoded,ENCODE-DCC/snovault,ENCODE-DCC/snovault,4dn-dcic/fourfront,hms-dbmi/fourfront,4dn-dcic/fourfront,ENCODE-DCC/encoded,T2DREAM/t2dream-portal,hms-dbmi/fourfront,4dn-dcic/fourfront,T2DREAM/t2dream-portal,T2DREAM/t2dream-portal,ENCODE-DCC/encoded | src/encoded/tests/test_search.py | src/encoded/tests/test_search.py | # Use workbook fixture from BDD tests (including elasticsearch)
from .features.conftest import app_settings, app, workbook
def test_search_view(workbook, testapp):
res = testapp.get('/search/').json
assert res['@type'] == ['Search']
assert res['@id'] == '/search/'
assert res['@context'] == '/terms/'
assert res['notification'] == 'Success'
assert res['title'] == 'Search'
assert res['total'] > 0
assert 'facets' in res
assert 'filters' in res
assert 'columns' in res
assert '@graph' in res
def test_matrix_view(workbook, testapp):
res = testapp.get('/experiments/matrix').json
assert res['@type'] == ['Matrix']
assert res['@id'] == '/experiments/matrix'
assert res['@context'] == '/terms/'
assert res['notification'] == 'Success'
assert res['title'] == 'Experiment Matrix'
assert res['total'] > 0
assert 'facets' in res
assert 'filters' in res
assert 'matrix' in res
assert res['matrix']['max_cell_doc_count'] > 0
assert res['matrix']['search_base'] == '/search/?type=experiment'
assert res['matrix']['x']['group_by'] == 'assay_term_name'
assert res['matrix']['x']['label'] == 'Assay'
assert res['matrix']['x']['limit'] == 20
assert len(res['matrix']['x']['buckets']) > 0
assert len(res['matrix']['x']['facets']) > 0
assert res['matrix']['y']['group_by'] == ['replicates.library.biosample.biosample_type', 'biosample_term_name']
assert res['matrix']['y']['label'] == 'Biosample'
assert res['matrix']['y']['limit'] == 5
assert len(res['matrix']['y']['replicates.library.biosample.biosample_type']['buckets']) > 0
assert len(res['matrix']['y']['replicates.library.biosample.biosample_type']['buckets'][0]['biosample_term_name']['buckets']) > 0
| mit | Python |
|
2cd1da31b099cbf37552b2a049c3df6619e0e64f | Add helper enums for type encodings | gamenet/redis-memory-analyzer | rma/redis_types.py | rma/redis_types.py | REDIS_ENCODING_ID_RAW = 0
REDIS_ENCODING_ID_INT = 1
REDIS_ENCODING_ID_EMBSTR = 2
REDIS_ENCODING_ID_HASHTABLE = 3
REDIS_ENCODING_ID_ZIPLIST = 4
REDIS_ENCODING_ID_LINKEDLIST = 5
REDIS_ENCODING_ID_QUICKLIST =6
REDIS_ENCODING_ID_INTSET = 7
REDIS_ENCODING_ID_SKIPLIST = 8
REDIS_ENCODING_STR_TO_ID_LIB = {
b'raw': REDIS_ENCODING_ID_RAW,
b'int': REDIS_ENCODING_ID_INT,
b'embstr': REDIS_ENCODING_ID_EMBSTR,
b'hashtable': REDIS_ENCODING_ID_HASHTABLE,
b'ziplist': REDIS_ENCODING_ID_ZIPLIST,
b'linkedlist': REDIS_ENCODING_ID_LINKEDLIST,
b'quicklist': REDIS_ENCODING_ID_QUICKLIST,
b'intset': REDIS_ENCODING_ID_INTSET,
b'skiplist': REDIS_ENCODING_ID_SKIPLIST,
}
REDIS_ENCODING_ID_TO_STR_LIB = dict((v, k) for k, v in REDIS_ENCODING_STR_TO_ID_LIB.items())
def redis_encoding_str_to_id(key_encoding):
if key_encoding in REDIS_ENCODING_STR_TO_ID_LIB:
return REDIS_ENCODING_STR_TO_ID_LIB[key_encoding]
raise ValueError("Invalid encoding `%s` given" % key_encoding)
def redis_encoding_id_to_str(key_encoding):
if key_encoding in REDIS_ENCODING_ID_TO_STR_LIB:
return REDIS_ENCODING_ID_TO_STR_LIB[key_encoding].decode('utf8')
raise ValueError("Invalid encoding `%s` given" % key_encoding)
| mit | Python |
|
6e577ecf55c107254816055ea810183b66e734b6 | Add management command to tag older icds sms with indicator metadata | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | custom/icds/management/commands/tag_icds_sms.py | custom/icds/management/commands/tag_icds_sms.py | from corehq.apps.sms.models import SMS
from corehq.messaging.smsbackends.icds_nic.models import SQLICDSBackend
from datetime import datetime
from django.core.management.base import BaseCommand
SUBSTRINGS = {
'hin': {
'aww_1': u'\u0906\u0901\u0917\u0928\u0935\u093e\u095c\u0940 \u0915\u0947\u0902\u0926\u094d\u0930 \u0926\u094d\u0935\u093e\u0930\u093e \u090f\u0915',
'aww_2': u'\u091f\u0940.\u090f\u091a . \u0930. \u0935\u093f\u0924\u0930\u0923 :',
'ls_1': u'\u091f\u0940.\u090f\u091a.\u0930.\u0935\u093f\u0924\u0930\u0923 :',
'ls_2': u'\u0928\u093f\u092e\u094d\u0932\u093f\u0916\u093f\u0924 \u0906\u0901\u0917\u0928\u0935\u093e\u095c\u0940 ',
'ls_6': u'\u0906\u0901\u0917\u0928\u0935\u093e\u095c\u0940 \u0915\u0947\u0902\u0926\u094d\u0930\u094b\u0902 \u0926\u094d\u0935\u093e\u0930\u093e',
},
'tel': {
'aww_1': u'\u0c05\u0c02\u0c17\u0c28\u0c4d \u0c35\u0c3e\u0c21\u0c40 \u0c15\u0c47\u0c02\u0c26\u0c4d\u0c30\u0c02 ICDS',
'aww_2': u'\u0c17\u0c43\u0c39 \u0c38\u0c02\u0c26\u0c30\u0c4d\u0c36\u0c28\u0c32\u0c41:',
'ls_1': u'\u0c17\u0c43\u0c39 \u0c38\u0c02\u0c26\u0c30\u0c4d\u0c36\u0c28\u0c32\u0c41 ',
'ls_2': u'\u0c17\u0c24 \u0c28\u0c46\u0c32 \u0c30\u0c4b\u0c1c\u0c41\u0c32\u0c4d\u0c32\u0c4b',
'ls_6': u'\u0c35\u0c3e\u0c30\u0c3f\u0c15\u0c3f \u0c24\u0c17\u0c3f\u0c28 \u0c38\u0c39\u0c3e\u0c2f\u0c02',
},
}
class Command(BaseCommand):
help = ""
def add_arguments(self, parser):
parser.add_argument('domain')
def get_indicator_slug(self, sms):
last_match = None
num_matches = 0
for lang_code, data in SUBSTRINGS.items():
for slug, substring in data.items():
if substring in sms.text:
last_match = slug
num_matches += 1
return last_match, num_matches
def handle(self, domain, **options):
for sms in SMS.objects.filter(
domain=domain,
backend_api=SQLICDSBackend.get_api_id(),
direction='O',
processed=True,
date__lt=datetime(2017, 6, 26),
):
if sms.custom_metadata:
continue
slug, num_matches = self.get_indicator_slug(sms)
if num_matches == 1:
sms.custom_metadata = {'icds_indicator': slug}
sms.save()
| bsd-3-clause | Python |
|
ea522fc3cdcec3d7e774cdaa93a36ef22c221432 | Add file for parsing eyelink data | mwaskom/moss,mwaskom/moss | moss/eyelink.py | moss/eyelink.py | import os
import subprocess
import tempfile
import shutil
import numpy as np
import pandas as pd
class EyeData(object):
def __init__(self, edf_file=None, asc_file=None):
if edf_file is None and asc_file is None:
raise ValueError("Must pass either EDF or ASCII file")
self.settings = dict(PRESCALER=None,
VPRESCALER=None,
PUPIL=None,
EVENTS=None,
SAMPLES=None)
self.messages = pd.Series(index=pd.Int64Index([], name="timestamp"))
self.eye_data = []
self.fixations = []
self.saccades = []
self.blinks = []
# Obtain eye data in ASCII format
if asc_file is None:
temp_dir = tempfile.mkdtemp()
asc_file = self.edf_to_asc(edf_file, temp_dir)
else:
temp_dir = None
# Process the eye data file
self.parse_asc_file(asc_file)
# Convert to better representations of the data
eye_data = pd.DataFrame(self.eye_data,
columns=["timestamp", "x", "y", "pupil"])
self.eye_data = (eye_data.replace({".": np.nan})
.apply(pd.to_numeric)
.set_index("timestamp"))
fix_columns = ["start", "end", "duration", "x", "y", "pupil"]
fixations = pd.DataFrame(self.fixations, columns=fix_columns)
self.fixations = fixations.replace({".": np.nan}).apply(pd.to_numeric)
sacc_columns = ["start", "end", "duration",
"start_x", "start_y", "end_x", "end_y",
"amplitude", "peak_velocity"]
saccades = pd.DataFrame(self.saccades, columns=sacc_columns)
self.saccades = saccades.replace({".": np.nan}).apply(pd.to_numeric)
blink_columns = ["start", "end", "duration"]
blinks = pd.DataFrame(self.blinks, columns=blink_columns)
self.blinks = blinks.replace({".": np.nan}).apply(pd.to_numeric)
# Clean up
if temp_dir is not None:
shutil.rmtree(temp_dir)
def edf_to_asc(self, edf_file, temp_dir):
subprocess.call(["edf2asc",
"-p", temp_dir,
edf_file])
self._temp_dir = temp_dir
edf_basename = os.path.basename(edf_file)
asc_basename = edf_basename[:-3] + "asc"
asc_file = os.path.join(temp_dir, asc_basename)
return asc_file
def parse_asc_file(self, asc_file):
with open(asc_file) as fid:
for line in fid:
self.parse_line(line)
def parse_line(self, line):
if not line[0].strip():
return
if line.startswith("*"):
return
fields = line.split()
if fields[0] in self.settings:
self.settings[fields[0]] = " ".join(fields[1:])
if fields[0] == "MSG":
timestamp = int(fields[1])
self.messages.loc[timestamp] = " ".join(fields[2:])
if fields[0] in ["SFIX", "SSACC", "SBLINK"]:
return
# Note that we are not reading the eye field for events, assuming
# that we are in monocular mode (as we always should be).
# This makes it simpler to convert data to numeric after parsing.
if fields[0] in ["EFIX"]:
self.fixations.append(fields[2:])
if fields[0] in ["ESACC"]:
self.saccades.append(fields[2:])
if fields[0] in ["EBLINK"]:
self.blinks.append(fields[2:])
try:
timestamp = int(fields[0])
except ValueError:
return
self.eye_data.append(fields[:4])
| bsd-3-clause | Python |
|
901046879338b1bc19de59675c7eb513bbc2c517 | add problem 19 | branning/euler,branning/euler | euler019.py | euler019.py | #!/usr/bin/env python
firsts = [1]
jan = 31
mar_dec = [31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
for year in range(1901,2001):
firsts.append(firsts[-1] + jan)
if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
feb = 29
else:
feb = 28
firsts.append(firsts[-1] + feb)
for mon in mar_dec:
firsts.append(firsts[-1] + mon)
print sum([1 for i in firsts if i%7==6])
| mit | Python |
|
dfa5bee0720f8d4b5f3ac2309915090239780045 | Test Flask file | btlee215/bme590hrm | flaskweb.py | flaskweb.py | from flask import Flask, request, jsonify
app = Flask(__name__)
@app.route("/hello/<name>")
def hello(name):
return "Hello World! %s" % name
@app.route("/data/")
def temptime():
arr = {"temp": [20, 21, 21],"time":[10,20,30],"unit":"s"}
return jsonify(arr)
@app.route("/add", methods = ['POST'])
def sum():
r = request.get_json()
a = r['a']
b = r['b']
sum = a + b
return '{:d}'.format(sum)
| mit | Python |
|
02f84b8cf3c3dd77b6d84d9ccea979c8de23eaa5 | Add Awesome renderers | elastic7327/django-tdd-restful-api,elastic7327/django-tdd-restful-api | src/common/renderers.py | src/common/renderers.py | import time
from rest_framework.renderers import JSONRenderer
from django.shortcuts import resolve_url
from django.template.loader import render_to_string
from django.utils.encoding import force_str
from django.utils.functional import Promise
from rest_framework.renderers import BaseRenderer, JSONRenderer, TemplateHTMLRenderer
from rest_framework.utils import encoders, json
# from drf_yasg.app_settings import redoc_settings, swagger_settings
# from drf_yasg.codecs import VALIDATORS, OpenAPICodecJson, OpenAPICodecYaml
# from drf_yasg.openapi import Swagger
# from drf_yasg.utils import filter_none
class AwesomeJSONRenderer(JSONRenderer):
def render(self, data, accepted_media_type=None, renderer_context=None):
status_code = renderer_context['response'].status_code
# {'detail': ErrorDetail(string='address value is not Bitcoin Address or Web Address', code='00002')}
if 'detail' in data:
# 에러 exception 인경우임
message = str(data['detail'])
message_code = int(data['detail'].code)
response = {
# 'timnestamp': int(time.time()),
# 'success': True,
# 'status_code': status_code,
'message_code': message_code,
'message': message,
'data': None,
# 'status_code': 200, # 200 고정임
# 'result': {
# 'msg': '',
# 'msg_code': '200',
# 'data': data,
# },
# 'error': message,
# 'error_code': message_code,
}
elif ('detail' not in data) and (status_code in [200, 201, 202]):
response = {
# 'timnestamp': int(time.time()),
# 'success': True,
# 'status_code': status_code,
'message_code': 100,
'message': 'success',
'data': data,
# 'status_code': 200, # 200 고정임
# 'result': {
# 'msg': '',
# 'msg_code': '200',
# 'data': data,
# },
# 'error': '',
# 'error_code': '',
}
else:
# 기본 400 에러인경우
response = {
# 'timnestamp': int(time.time()),
# 'success': True,
# 'status_code': status_code,
'message_code': status_code,
'message': data,
'data': None,
# 'status_code': 200, # 200 고정임
# 'result': {
# 'msg': '',
# 'msg_code': '200',
# 'data': data,
# },
# 'error': '',
# 'error_code': '',
}
return super(AwesomeJSONRenderer, self).render(response, accepted_media_type, renderer_context)
| mit | Python |
|
80a435e3e382791b5615755d05c5353114650ecc | test only | yaqintang/yaqintang.github.io | hello.py | hello.py | #!/usr/bin/python
print "Content-type:text/html\r\n\r\n"
print '<html>'
print '<head>'
print '<title>Hello Word - First CGI Program</title>'
print '</head>'
print '<body>'
print '<h2>Hello Word! This is my first CGI program</h2>'
print '</body>'
print '</html>'
| mit | Python |
|
101f378fb536cdaf8f2c681f5b1fba669bf70631 | Add hex xor | lttviet/randomPy | hexor.py | hexor.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# xor 2 hex strings
import string
def isHex(s):
'''Check if it is a hex string'''
if (len(s) == 0 or len(s) % 2 != 0
or not all(c in string.hexdigits for c in s)):
return False
return True
def hexor(s1, s2):
'''xor 2 hex strings, returning a hex string'''
s3 = (int(c1,16) ^ int(c2,16) for (c1,c2) in zip(s1,s2))
res = ""
for c in s3:
res += "{:x}".format(c)
return res
if __name__ == "__main__":
while True:
s1 = input("First string: ")
s2 = input("Second string: ")
if not isHex(s1) or not isHex(s2):
print("Your hex string(s) are invalid!")
continue
else:
print("Result: ", hexor(s1,s2))
| mit | Python |
|
6a9d60a6e48b3231675e465c1a837c909a9e652a | Add forward2 | LemonAniLabs/tensorflow-resnet | forward2.py | forward2.py | from convert import print_prob, load_image, checkpoint_fn, meta_fn
import tensorflow as tf
import resnet
import os
layers = 50
img = load_image("data/cat.jpg")
sess = tf.Session()
filename = checkpoint_fn(layers)
filename = os.path.realpath(filename)
if layers == 50:
num_blocks = [3, 4, 6, 3]
elif layers == 101:
num_blocks = [3, 4, 23, 3]
elif layers == 152:
num_blocks = [3, 8, 36, 3]
with tf.device('/cpu:0'):
images = tf.placeholder("float32", [None, 224, 224, 3], name="images")
logits = resnet.inference(images,
is_training=False,
num_blocks=num_blocks,
preprocess=True,
bottleneck=True)
prob = tf.nn.softmax(logits, name='prob')
saver = tf.train.Saver()
saver.restore(sess, filename)
graph = tf.get_default_graph()
prob_tensor = graph.get_tensor_by_name("prob:0")
for op in graph.get_operations():
print op.name
print "graph restored"
batch = img.reshape((1, 224, 224, 3))
feed_dict = {images: batch}
prob = sess.run(prob_tensor, feed_dict=feed_dict)
print_prob(prob[0])
| mit | Python |
|
c794fbf00c5ba5b661f01fcbd0652105ed4c3904 | Add missing migration. | praekelt/mc2,praekelt/mc2,praekelt/mc2,praekelt/mc2,praekelt/mc2 | mc2/controllers/base/migrations/0005_field_defaults.py | mc2/controllers/base/migrations/0005_field_defaults.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0004_marathonlabel'),
]
operations = [
migrations.AlterField(
model_name='envvariable',
name='key',
field=models.TextField(default='', blank=True),
preserve_default=False,
),
migrations.AlterField(
model_name='marathonlabel',
name='name',
field=models.TextField(default='', blank=True),
preserve_default=False,
),
]
| bsd-2-clause | Python |
|
46a9c3789b86631258d881dacf6ae529ec277d70 | Add stats262.py | lingdb/CoBL-public,lingdb/CoBL-public,lingdb/CoBL-public,lingdb/CoBL-public | ielex/lexicon/management/commands/stats262.py | ielex/lexicon/management/commands/stats262.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management import BaseCommand
from ielex.lexicon.models import Language, \
Meaning, \
Lexeme, \
CognateJudgementCitation
class Command(BaseCommand):
help = "Computes statistics for https://github.com/lingdb/CoBL/issues/262"\
"\nPossible parameters are: {1, 2, 3} for task number."
def add_arguments(self, parser):
parser.add_argument('task', type=int)
missing_args_message = "Please provide a task number of {1,2,3}."
def handle(self, *args, **options):
# Data to work with:
languageIds = Language.objects.filter(
languagelist__name='Current').values_list('id', flat=True)
meaningIds = Meaning.objects.filter(
meaninglist__name='Jena200').values_list('id', flat=True)
lexemeIds = Lexeme.objects.filter(
language_id__in=languageIds,
meaning_id__in=meaningIds,
not_swadesh_term=False).values_list('id', flat=True)
self.stdout.write("Task %s:" % options['task'])
taskFilter = {1: 'C', # Doubtful
2: 'L', # Loanword
3: 'X'} # Exclude
cjcs = CognateJudgementCitation.objects.filter(
cognate_judgement__lexeme_id__in=lexemeIds,
reliability=taskFilter[options['task']]).all()
for cjc in cjcs:
cj = cjc.cognate_judgement
self.stdout.write("CognateJudgementCitation %s "
"of CognateClass %s "
"and Lexeme %s." % (cjc.id,
cj.cognate_class.id,
cj.lexeme.id))
| bsd-2-clause | Python |
|
6f27af536f9421c2b73def505648a039d4f0ad1f | Manage Rackes Code | najla88/SaedRobot | ManageRacks.py | ManageRacks.py | import sqlite3
import gi
import json
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
con = sqlite3.connect('SaedRobot.db')
cur = con.cursor()
cur.execute("SELECT VOLSER from inventory")
software_list = cur.fetchall()
class ManageRack(Gtk.Window):
builder =None
window= None
box = None
def __init__(self):
self.builder = Gtk.Builder()
self.builder.add_from_file("A.glade")
self.window = self.builder.get_object("window1")
grid=self.builder.get_object("grid3")
AddBtn=self.builder.get_object("AddBtn")
DelBtn=self.builder.get_object("DelBtn")
backBtn=self.builder.get_object("backBtn")
AddBtn.connect("clicked",self.Add)
DelBtn.connect("clicked",self.Del)
backBtn.connect("clicked",self.back)
#Creating the ListStore model
self.software_liststore = Gtk.ListStore(str)
for software_ref in software_list:
self.software_liststore.append(list(software_ref))
self.current_filter_language = None
#Creating the filter, feeding it with the liststore model
self.language_filter = self.software_liststore.filter_new()
#creating the treeview, making it use the filter as a model, and adding the columns
self.treeview = Gtk.TreeView.new_with_model(self.language_filter)
for i, column_title in enumerate(["Rack Name"]):
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn(column_title, renderer, text=i)
self.treeview.append_column(column)
#setting up the layout, putting the treeview in a scrollwindow
self.scrollable_treelist = Gtk.ScrolledWindow()
self.scrollable_treelist.set_vexpand(True)
self.scrollable_treelist.set_hexpand(True)
grid.attach(self.scrollable_treelist, 0, 0, 1, 1)
self.scrollable_treelist.add(self.treeview)
self.window.show_all()
def Add(self,button):
self.window.destroy()
self.window=AddRack()
def Del(self,button):
self.window.destroy()
#self.window=login()
def back(self,button):
self.window.destroy()
#self.window=login()
class AddRack():
builder =None
window = None
def __init__(self):
self.builder = Gtk.Builder()
self.builder.add_from_file("A.glade")
self.window = self.builder.get_object("window2")
AddBtn=self.builder.get_object("AddBtn")
backBtn=self.builder.get_object("backBtn")
AddBtn.connect("clicked",self.Add)
backBtn.connect("clicked",self.back)
self.window.show()
def back(self,button):
self.window.destroy()
self.window=ManageRack()
def Add(self,button):
self.window.destroy()
#self.window=ManageRack()
window=ManageRack()
Gtk.main()
| mit | Python |
|
a7728b466f5cacb662566e9e71ebc661ae40271a | Create max_end3.py | dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey | Python/CodingBat/max_end3.py | Python/CodingBat/max_end3.py | # http://codingbat.com/prob/p135290
def max_end3(nums):
max = nums[0] if (nums[0] > nums[-1]) else nums[-1] # or use max(arg1, arg2)
for i in range(3):
nums[i] = max
return nums
| mit | Python |
|
6ae82ecdd749b936289b496a10faa2caf1aa94c6 | Add first version of the code | derherrg/pybibsort | bibsort.py | bibsort.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from collections import OrderedDict
import codecs
class BibEntry:
def __init__(self, **kwargs):
self.data = {}
for key, value in kwargs.iteritems():
self.data[key] = value
def entry(self):
data = OrderedDict(sorted(self.data.items(), key=lambda t: t[0]))
result = u'@{0}{{{1},\n'.format(self.data['type'].upper(), self.data['key'])
for key, value in data.items():
if key in ['type','key']:
continue
result += u'\t{0} = {{{1}}},\n'.format(key, value)
result = result[:-2] + u'\n}\n'
return result
def must_omit(i):
return re.match("comment", i) or re.match("%%", i)
def entries_from_file(file):
keywords = ['address', 'annote', 'author', 'booktitle', 'chapter', 'crossref',
'doi', 'edition', 'editor', 'eprint', 'eprintclass', 'eprinttype',
'howpublished', 'institution', 'journal', 'month', 'note', 'number',
'organization', 'pages', 'publisher', 'school', 'series', 'title',
'type', 'url', 'urldate', 'volume', 'year']
with codecs.open(file, "r", "utf-8") as f:
text = f.read()
entries = []
entry_blocks = [i for i in re.split("\n@", text) if not must_omit(i)]
for entry in entry_blocks:
entry_dict = {}
search = re.match("(?P<type>.*){(?P<key>.*)", entry)
if search:
key = search.group("key")[:-1]
if search.group("type").startswith('@'):
type = search.group("type")[1:]
else:
type = search.group("type")
entry_dict["key"] = key
entry_dict["type"] = type
for keyword in keywords:
string = "\s*"+keyword+"\s*=\s*[{]?(?P<"+keyword+">\S.*),?\n"
search = re.search(string, entry)
if search:
# Prohibits that 'eprinttype' overrides 'type'
if keyword in entry_dict.keys():
continue
value = search.group(keyword)
if value.endswith(','):
value = value[:-1]
if value.endswith('}}'):
value = value[:-1]
if value.endswith('}') and not value.startswith('{'):
value = value[:-1]
entry_dict[keyword] = value
if entry_dict != {}:
entries.append(BibEntry(**entry_dict))
return entries
BibEntries = entries_from_file('bibliography.bib')
BibEntries.sort(key=lambda x: x.data['key'].lower())
for _ in BibEntries:
print _.entry() | mit | Python |
|
2428467d8c0d9c70a4931e1bd1b5971c9f45a0b7 | add function | BhaskarNaidu/python | function.py | function.py | def foo(x,y):
print(x+y)
foo(3,4)
| apache-2.0 | Python |
|
9873891a9f26edc51a22e51b5910615a7e08d410 | Create WaterLevel.py | Python-IoT/Smart-IoT-Planting-System,Python-IoT/Smart-IoT-Planting-System | device/src/WaterLevel.py | device/src/WaterLevel.py | #Water level sensor.
#VCC
#GND
#AO <--> ADC Port(A7) Analog data
#AO is the specific value.
import pyb
adc = pyb.ADC(Pin('A7')) # create an analog object from a pin
adc = pyb.ADC(pyb.Pin.board.A7)
# read an analog value
def getWaterLevel():
print('WaterLevel Ao')
return adc.read()
| mit | Python |
|
d43d4f29752bfae8a4d2e337f5523cd5fc7888d8 | add Trimplementation of Kadane's algorithm | ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms | dp/kadane-_algorithm/py/TrieToSucceed_kadane.py | dp/kadane-_algorithm/py/TrieToSucceed_kadane.py | #!/usr/bin/python3
"""
This module contains an implementation of Kadane's algorithm to determine the
maximum sum of a subarray.
"""
def kadane(list_obj=None):
"""
Find maximum sum of a subarray
:param list list_int: list of objs
:return: maximum sum of subarray
:rtype: int
DOCTESTS
--------
Test 1 (list of ints):
>>> print(kadane([-1, 2, 3, -4, 5, -6]))
6
Test 2 (list of ints):
>>> print(kadane([-1, 2, 3, -6, 5, -6]))
5
Test 3 (list of ints):
>>> print(kadane([3, 2, 3, -7, 5, -6]))
11
Test 4 (invalid argument type):
>>> print(kadane())
Traceback (most recent call last):
...
TypeError: input must be of type list
Test 5 (empty list):
>>> print(kadane([]))
Traceback (most recent call last):
...
ValueError: list must not be empty
"""
if type(list_obj) is not list:
raise TypeError("input must be of type list")
if not list_obj:
raise ValueError("list must not be empty")
max_sum, cur_max = list_obj[0], list_obj[0]
size = len(list_obj)
for idx, val in enumerate(list_obj):
cur_max = max(val, val + cur_max)
max_sum = max(max_sum, cur_max)
return max_sum
if __name__ == '__main__':
import doctest
doctest.testmod()
| cc0-1.0 | Python |
|
08e43e8bfd150252b3e05ff62ee25cdf0e519f20 | Revert #830 because it broke the case when the main script is not in path. | jpakkane/meson,becm/meson,pexip/meson,centricular/meson,ernestask/meson,aaronp24/meson,aaronp24/meson,QuLogic/meson,trhd/meson,jpakkane/meson,QuLogic/meson,jpakkane/meson,QuLogic/meson,jeandet/meson,mesonbuild/meson,rhd/meson,trhd/meson,ernestask/meson,becm/meson,fmuellner/meson,becm/meson,fmuellner/meson,ernestask/meson,becm/meson,wberrier/meson,aaronp24/meson,fmuellner/meson,becm/meson,thiblahute/meson,centricular/meson,trhd/meson,pexip/meson,QuLogic/meson,fmuellner/meson,ernestask/meson,wberrier/meson,ernestask/meson,trhd/meson,wberrier/meson,centricular/meson,centricular/meson,jpakkane/meson,pexip/meson,aaronp24/meson,mesonbuild/meson,thiblahute/meson,jeandet/meson,wberrier/meson,becm/meson,rhd/meson,rhd/meson,MathieuDuponchelle/meson,trhd/meson,centricular/meson,fmuellner/meson,thiblahute/meson,aaronp24/meson,rhd/meson,MathieuDuponchelle/meson,pexip/meson,centricular/meson,thiblahute/meson,fmuellner/meson,trhd/meson,trhd/meson,mesonbuild/meson,thiblahute/meson,centricular/meson,jpakkane/meson,wberrier/meson,jpakkane/meson,MathieuDuponchelle/meson,mesonbuild/meson,mesonbuild/meson,rhd/meson,centricular/meson,jeandet/meson,MathieuDuponchelle/meson,jeandet/meson,QuLogic/meson,rhd/meson,MathieuDuponchelle/meson,thiblahute/meson,QuLogic/meson,trhd/meson,fmuellner/meson,ernestask/meson,fmuellner/meson,ernestask/meson,MathieuDuponchelle/meson,ernestask/meson,thiblahute/meson,pexip/meson,jeandet/meson,ernestask/meson,jeandet/meson,pexip/meson,wberrier/meson,becm/meson,mesonbuild/meson,fmuellner/meson,rhd/meson,thiblahute/meson,jpakkane/meson,trhd/meson,jpakkane/meson,mesonbuild/meson,thiblahute/meson,aaronp24/meson,jpakkane/meson,MathieuDuponchelle/meson,pexip/meson,QuLogic/meson,rhd/meson,aaronp24/meson,QuLogic/meson,wberrier/meson,mesonbuild/meson,QuLogic/meson,mesonbuild/meson,pexip/meson,MathieuDuponchelle/meson,becm/meson,MathieuDuponchelle/meson,mesonbuild/meson,becm/meson,becm/meson,aaronp24/meson,pexip/meson,jeandet/meson,jeandet/meson,wberrier/meson,aaronp24/meson,pexip/meson,jeandet/meson | meson.py | meson.py | #!/usr/bin/env python3
# Copyright 2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mesonbuild import mesonmain
import sys, os
def main():
thisfile = __file__
if not os.path.isabs(thisfile):
thisfile = os.path.normpath(os.path.join(os.getcwd(), thisfile))
if __package__ == '':
thisfile = os.path.dirname(thisfile)
# The first argument *must* be an absolute path because
# the user may have launched the program from a dir
# that is not in path.
sys.exit(mesonmain.run(thisfile, sys.argv[1:]))
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
# Copyright 2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mesonbuild import mesonmain
import sys, os
sys.exit(mesonmain.run(sys.argv[0], sys.argv[1:]))
| apache-2.0 | Python |
27b2e87a8653961fbba45962e9e6ec1d20904a03 | Create demo_lcd.py | bradgillap/I2C-LCD-Display | 20x4LCD/demo_lcd.py | 20x4LCD/demo_lcd.py | import lcddriver
from time import *
lcd = lcddriver.lcd()
lcd.lcd_display_string("Hello world", 1)
lcd.lcd_display_string("My name is", 2)
lcd.lcd_display_string("picorder", 3)
lcd.lcd_display_string("I am a Raspberry Pi", 4)
| apache-2.0 | Python |
|
2c345f2927cba033908020b97c33064bbfce5fbd | Add 38-count-and-say.py | mvj3/leetcode | 38-count-and-say.py | 38-count-and-say.py | """
Count and Say
The count-and-say sequence is the sequence of integers beginning as follows:
1, 11, 21, 1211, 111221, ...
1 is read off as "one 1" or 11.
11 is read off as "two 1s" or 21.
21 is read off as "one 2, then one 1" or 1211.
Given an integer n, generate the nth sequence.
Note: The sequence of integers will be represented as a string.
----------------- A better description from careercup.com ---------------
"Count and Say problem" Write a code to do following:
n String to print
0 1
1 1 1
2 2 1
3 1 2 1 1
...
Base case: n = 0 print "1"
for n = 1, look at previous string and write number of times a digit is seen and the digit itself. In this case, digit 1 is seen 1 time in a row... so print "1 1"
for n = 2, digit 1 is seen two times in a row, so print "2 1"
for n = 3, digit 2 is seen 1 time and then digit 1 is seen 1 so print "1 2 1 1"
for n = 4 you will print "1 1 1 2 2 1"
Consider the numbers as integers for simplicity. e.g. if previous string is "10 1" then the next will be "1 10 1 1" and the next one will be "1 1 1 10 2 1"
Performance:
1. Total Accepted: 56840 Total Submissions: 219756 Difficulty: Easy
"""
class Solution(object):
# Thanks https://github.com/jw2013/Leetcode-Py/blob/master/Count%20and%20Say.py
def countAndSay(self, n):
"""
:type n: int
:rtype: str
"""
sequence = "1"
for time in xrange(n - 1):
idx, next_sequence = 0, ""
end_idx = len(sequence) - 1
while idx < len(sequence):
count = 1
while idx < end_idx and sequence[idx] == sequence[idx + 1]:
idx += 1
count += 1
next_sequence += "{}{}".format(count, sequence[idx])
idx += 1
sequence = next_sequence
return sequence
def test_func(result, expect):
assert result == expect, [result, expect]
test_func(Solution().countAndSay(1), "1")
test_func(Solution().countAndSay(2), "11")
test_func(Solution().countAndSay(3), "21")
test_func(Solution().countAndSay(4), "1211")
test_func(Solution().countAndSay(5), "111221")
test_func(Solution().countAndSay(6), "312211")
"""
if n == 0:
return sequence
while n > 0:
next_sequence = ""
curr_char = sequence[0]
curr_char_matching_count = 1
dummy_end = len(sequence) # to finish the last count+num
is_same = False
for idx in xrange(1, dummy_end + 1):
if idx < dummy_end:
is_same = sequence[idx] == curr_char
if is_same:
curr_char_matching_count += 1
if (idx == dummy_end) or not is_same:
next_sequence += curr_char + str(curr_char_matching_count)
# prepare next round
if (idx < dummy_end) and (not is_same):
curr_char = sequence[idx]
sequence = next_sequence
n -= 1"""
"""
NOTE: If dont use a cursor, but use some variables to hold position informations, it's hard to debug!!! And it costs me several hours...
class Solution(object):
def countAndSay(self, num):
sequence = "1" # the default start
for time in range(num):
next_sequence = ""
curr_char_matching_count = 1
for idx, curr_char in enumerate(sequence):
if idx < len(curr_char) - 1:
if curr_char == sequence[idx + 1]:
curr_char_matching_count += 1
else:
next_sequence += (str(curr_char_matching_count) + curr_char)
curr_char_matching_count = 0
if idx == len(curr_char) - 1:
next_sequence += (str(curr_char_matching_count) + curr_char)
sequence = next_sequence
print "sequence:", sequence
print "-"*100
print
return sequence
"""
| mit | Python |
|
e4a4e8d43c1b4c63ac32467a8e49a5b81f8f2fa3 | Create roundrobin.py | LassiAutio/scheduler | roundrobin.py | roundrobin.py | import string
from game import Game
class RoundRobin(object):
def __init__(self, teams_count):
self.teams = generateTeams(teams_count)
self.current_round = 0
def getRound(self):
games = []
teams_count = len(self.teams)
home_away_index = self.current_round // (teams_count-1)
for i in range(0, teams_count, 2):
if home_away_index%2 == 0:
game = Game( self.teams[i], self.teams[i+1] )
else:
game = Game( self.teams[i+1], self.teams[i] )
games.append( game )
return games
def getNextRound(self):
self.rotate()
return self.getRound()
def rotate(self):
head = self.teams[0]
tail = self.teams[1: len(self.teams)-1]
second = self.teams[len(self.teams)-1]
self.teams = []
self.teams.append(head)
self.teams.append(second)
self.teams = self.teams + tail
self.current_round += 1
def getSchedule(self, rounds_count):
schedule = []
for i in range(rounds_count):
games = self.getRound()
schedule.append(games)
self.rotate()
return schedule
def printSchedule(self, rounds_count):
schedule = self.getSchedule(rounds_count)
for day in range(len(schedule)):
print "== Day #" + str(day+1)
games = schedule[day]
for game in games:
print game
self.rotate()
def generateTeams(teams_count):
teams = list(string.ascii_uppercase)[:teams_count]
if teams_count%2 != 0:
teams.append(" ")
return teams
| mit | Python |
|
d1eac9803adbf9b91b22ce62a4bdf5db790b6265 | Create ShodanToCSV.py | JeroenSlobbe/Scripts,JeroenSlobbe/Scripts,JeroenSlobbe/Scripts | ShodanToCSV.py | ShodanToCSV.py | #!/usr/bin/env python
#
# Search shodan, output to CSV
# To ensure comma as seperator, all comma's in os and header field (if any) are replaced for ;;;
# To ensure row integrity all newlines (\n) are replaced by #NWLN
# Author: Jeroen
import shodan
import sys
import os
from optparse import OptionParser
#Initialize userinput
oparser = OptionParser("usage: %prog [options] [command]*", version="v%d.%d.%d" % (1, 0, 0))
oparser.add_option("-d", "--debug", dest="debug", action = "store_true", help="Be extremely verbose", default=False)
oparser.add_option("-k", "--key", dest="AKEY", help="Use your personal API key",default="GETYOUROWNKEY")
oparser.add_option("-s", "--search", dest="searchQuery", help="Insert shodan search query",default=False)
oparser.add_option("-o", "--output", dest="outputFileName", help="output filename",default="output.csv")
(options,args) = oparser.parse_args(sys.argv)
if (options.searchQuery == False):
print 'Type shodanToCSV.py --help for syntax'
sys.exit(1)
try:
# Setup the api
api = shodan.WebAPI(options.AKEY)
# Perform the search
result = api.search(options.searchQuery)
csvHeader = "ip,port,os,country,lastupdate,header\n"
fo = open(options.outputFileName, 'w')
fo.write(str(csvHeader))
# Loop through the matches and print each IP
for result in result['matches']:
row = result['ip'] + ',' + str(result['port']) + ',' + str(result['os']).replace(",",";;;") + ',' + result['country_name'] + ',' + result['updated'] + ',' + str(result['data']).replace(",",";;;")
row = row.replace("\r\n","").replace("\n","") + str(os.linesep)
if(options.debug != False):
print str(row)
fo.write(str(row))
fo.close()
except Exception, e:
print 'Error: %s' % e
exit(1)
| mit | Python |
|
9be177007ce95f2b9e47225a46effe7b7682ba38 | Create StockReader.py | econogee/FinanceScripts | StockReader.py | StockReader.py | #econogee, 1/28/2016
#Stock Data Retrieval Script
import os
import numpy as np
import urllib2
startday = str(0)
startmonth = str(1)
startyear = str(2005)
endday = str(30)
endmonth = str(1)
endyear = str(2016)
symbols = []
with open('stocklist.csv') as f:
content = f.readlines()
for l in content:
symbols.append(l.split(",")[0])
for s in symbols:
response = urllib2.urlopen('http://real-chart.finance.yahoo.com/table.csv?s='+str(s)+\
'&a=' + startday + '&b=' + startmonth + '&c=' + startyear + \
'&d=' + endday + '&e=' + endmonth + '&f=' + endyear + \
'&g=d&ignore=.csv')
html = response.read()
html = html.split('\n')
html = np.array(html)
np.savetxt(str(s),html,fmt='%s',delimiter=',')
| mit | Python |
|
706da9008e8101c03bb2c7754b709209897cd952 | Add Organization Administrator model. | MatthewWilkes/mw4068-packaging,MatthewWilkes/mw4068-packaging,MatthewWilkes/mw4068-packaging,MatthewWilkes/mw4068-packaging | app/soc/models/org_admin.py | app/soc/models/org_admin.py | #!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the Organization Administrator Model."""
__authors__ = [
'"Pawel Solyga" <pawel.solyga@gmail.com>',
]
import soc.models.role
class OrgAdmin(soc.models.role.Role):
"""Adminitrator details for a specific Organization.
"""
pass
| apache-2.0 | Python |
|
2e3349b75fffb9a9f3906d065bc8f141eef02d38 | Add run_wsgi | aitoralmeida/intellidata,aitoralmeida/intellidata | run_wsgi.wsgi | run_wsgi.wsgi | #!/usr/bin/env python
import os
import sys
sys.stdout = sys.stderr
INTELLIDATA_DIR = os.path.dirname(__file__)
sys.path.insert(0, INTELLIDATA_DIR)
os.chdir(INTELLIDATA_DIR)
import config
from intellidata import app as application
application.config.from_object('config')
| apache-2.0 | Python |
|
a086e7328ca920f269812a87be095ce638467f95 | Add youtube-dl library sample of operation | daineseh/python_code | crawler/youtube_dl_op_sample.py | crawler/youtube_dl_op_sample.py | #!/usr/bin/env python2
#-*- coding: utf-8 -*-
import sys
import youtube_dl
def main():
if len(sys.argv) < 2:
print("Usage: youtube_dl_op_sample.py URL")
return
opts = {
'forceurl': True,
'quiet': True,
'simulate': True,
}
url = sys.argv[1]
try:
with youtube_dl.YoutubeDL(opts) as ydl:
extract_info = ydl.extract_info(url)
resource_uri = extract_info.get('url')
if not resource_uri:
format_id = extract_info.get('format_id')
for fmt in extract_info.get('formats'):
if format_id != fmt.get('format_id'):
continue
resource_uri = fmt.get('url')
except Exception as e:
print(e)
resource_uri = None
if resource_uri:
print("resource_uri: %s" % resource_uri)
else:
print("Nothing at all.")
if __name__ == '__main__':
main()
| mit | Python |
|
70927650139a94b1c7be5557e47340ccda609d36 | Create UnicommWlan.py | xiaoyao9933/ChinaUnicomWlan-AutoLogin | UnicommWlan.py | UnicommWlan.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Auto login the unicomm wlan
# By Lu CHAO(me@chao.lu) ,2013 10 12.
from urllib2 import build_opener,HTTPCookieProcessor
from urllib import urlencode
from cookielib import CookieJar
import time,sys
from random import random
global loop
global count
loop=True
count=0
def LoginWlan(user,password,address):
index_page = "http://202.106.46.37/"
global loop,count
try:
#获得一个cookieJar实例
cj = CookieJar()
#cookieJar作为参数,获得一个opener的实例
opener=build_opener(HTTPCookieProcessor(cj))
#伪装成一个正常的浏览器,避免有些web服务器拒绝访问。
opener.addheaders = [('User-agent','Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)')]
#生成Post数据,含有登陆用户名密码。
data = urlencode({"username":user,"password":password,"passwordType":6,"wlanuserip":"","userOpenAddress":address,"checkbox":0,"basname":"","setUserOnline":"","sap":"","macAddr":"","bandMacAuth":0,"isMacAuth":"","basPushUrl":"http%253A%252F%252F202.106.46.37%252F","passwordkey":""})
#以post的方法访问登陆页面,访问之后cookieJar会自定保存cookie
opener.open(index_page)
#以带cookie的方式访问页面ss
op=opener.open("http://202.106.46.37/login.do",data)
#读取页面源码
data= op.read()
if 'success' in data:
print "%s : Logsin Success"%(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
elif u"此账号已在线!" in data.decode('utf-8'):
count=count+1
else:
print "%s :Failed "%(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
print data.decode('utf-8')
loop=False
opener.close()
return
except Exception,e:
print str(e)
file=open("/var/log/autologin.log",'w')
sys.stdout=file
sys.stderr=file
while loop:
#在这里更改你的用户名,密码,归属地
LoginWlan("你的用户名","你的密码","bj")
file.flush()
if count%10==1:
print "%s :Count %d"%(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())),count)
elif count>10000:
count=0
else:
None
time.sleep(20+int(random()*5))
| apache-2.0 | Python |
|
a083baddd853514a5697e3a98eea4251c2ce5487 | Create __openerp__.py | elmonitor/workshop-td | __openerp__.py | __openerp__.py | {
"name": "Product price based on margin with formula sale_price=cost_price/margin",
"version": "8.0.0.1",
"author": "3nodus",
'category': 'Product',
"website": "http://www.3nodus.com/",
"license": "AGPL-3",
"depends": [
"product",
],
"demo": [
],
"data": [
],
"test": [],
"js": [],
"css": [],
"qweb": [],
"installable": True,
"auto_install": False,
}
| agpl-3.0 | Python |
|
ca83457b4a003527cad9c9d57402c53e4571299c | add python opt and logging boilerplate code | yunfeiguo/bioinfo_toolbox,yunfeiguo/bioinfo_toolbox,yunfeiguo/bioinfo_toolbox | sandbox/python/boilerplate_code/python_opt_log.py | sandbox/python/boilerplate_code/python_opt_log.py | #!/usr/bin/env python
import argparse
import logging
import os
import sys
import re
logger = None
def my_function(blah):
return
if __name__ == "__main__":
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
parser = argparse.ArgumentParser(description="program name", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("input1", type = file)
parser.add_argument("input2", type = file)
parser.add_argument("--selection", type = str, default = 'a', choices = ['a', 'b', 'c'], help = 'choose from a,b,c')
parser.add_argument("--cutoff", type = int, default = 1, help = 'cutoff score')
parser.add_argument("--variable_args", type = float, action = 'append', nargs = 3,
default = [1.0,2.0,1.2], help = '3 scores')
parser.add_argument("--verbose","-v", action = 'count', help='increase verbosity')
args = parser.parse_args()
if args.verbose >= 1:
logging.basicConfig(level=logging.DEBUG, format = FORMAT)
else:
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
logger.info("working hard ...")
my_function(args.input1, args.input2)
logger.info("Done.")
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.