repo_name
stringlengths 6
90
| path
stringlengths 4
230
| copies
stringlengths 1
4
| size
stringlengths 4
7
| content
stringlengths 734
985k
| license
stringclasses 15
values | hash
int64 -9,223,303,126,770,100,000
9,223,233,360B
| line_mean
float64 3.79
99.6
| line_max
int64 19
999
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
8.06
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
brohrer/becca_ros | nodes/agent_node.py | 1 | 2432 | #!/usr/bin/env python
import numpy as np
import rospy
from std_msgs.msg import String
from becca_ros.msg import *
from becca.core.agent import Agent
class AgentNode():
def __init__(self):
restore = True
world_initialized = False
while not (world_initialized):
if rospy.get_param('world_initialized') == 'True':
world_initialized = True
num_sensors = rospy.get_param('num_sensors')
num_actions = rospy.get_param('num_actions')
agent_name = '_'.join((rospy.get_param('world_name'), 'agent'))
self.agent = Agent(num_sensors, num_actions, agent_name=agent_name)
if restore:
self.agent = self.agent.restore()
# Set up the subscriber. Anytime a message is received on the
# 'sensors_reward' channel, the callback function is called.
rospy.Subscriber('sensors_reward', Sensors_Reward, self.callback)
# Set up the publisher.
# queue_size=1 means that only only the most recently published
# set of sensor data is available for the agent node. If the
# agent node steps too slowly, it will drop sensor data.
self.actions_pub = rospy.Publisher('actions', Actions, queue_size=1)
rospy.init_node('agent_node')
self.rate = rospy.Rate(1000) # 10hz
self.has_new_sensors = False
def callback(self, sensors_reward_msg):
""" Whenever the subscriber receives actions, update the node """
self.has_new_sensors = True
self.sensors = np.array(sensors_reward_msg.sensors)
self.reward = sensors_reward_msg.reward
def run(self):
""" Step through the agent on a loop """
while not rospy.is_shutdown():
# Only step the agent if a new set of sensors has been received
if self.has_new_sensors:
self.has_new_sensors = False
# Advance the agent by one time step
actions = self.agent.step(self.sensors, self.reward)
# Populate the message to be passed back to the world node
actions_data = Actions()
actions_data.actions = list(actions.ravel())
self.actions_pub.publish(actions_data)
self.rate.sleep()
if __name__ == '__main__':
try:
agent_node = AgentNode()
agent_node.run()
except rospy.ROSInterruptException: pass
| mit | -8,626,491,577,455,195,000 | 39.533333 | 76 | 0.610609 | false | 4.129032 | false | false | false |
sissaschool/xmlschema | xmlschema/validators/notations.py | 1 | 1526 | #
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
from ..names import XSD_NOTATION
from ..helpers import get_qname
from .xsdbase import XsdComponent
class XsdNotation(XsdComponent):
"""
Class for XSD *notation* declarations.
.. <notation
id = ID
name = NCName
public = token
system = anyURI
{any attributes with non-schema namespace}...>
Content: (annotation?)
</notation>
"""
_ADMITTED_TAGS = {XSD_NOTATION}
@property
def built(self):
return True
def _parse(self):
super(XsdNotation, self)._parse()
if self.parent is not None:
self.parse_error("a notation declaration must be global")
try:
self.name = get_qname(self.target_namespace, self.elem.attrib['name'])
except KeyError:
self.parse_error("a notation must have a 'name' attribute")
if 'public' not in self.elem.attrib and 'system' not in self.elem.attrib:
self.parse_error("a notation must have a 'public' or a 'system' attribute")
@property
def public(self):
return self.elem.get('public')
@property
def system(self):
return self.elem.get('system')
| mit | -7,948,054,479,086,276,000 | 28.346154 | 87 | 0.631062 | false | 3.815 | false | false | false |
isnowfy/snownlp | snownlp/sim/bm25.py | 7 | 1443 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import math
class BM25(object):
def __init__(self, docs):
self.D = len(docs)
self.avgdl = sum([len(doc)+0.0 for doc in docs]) / self.D
self.docs = docs
self.f = []
self.df = {}
self.idf = {}
self.k1 = 1.5
self.b = 0.75
self.init()
def init(self):
for doc in self.docs:
tmp = {}
for word in doc:
if not word in tmp:
tmp[word] = 0
tmp[word] += 1
self.f.append(tmp)
for k, v in tmp.items():
if k not in self.df:
self.df[k] = 0
self.df[k] += 1
for k, v in self.df.items():
self.idf[k] = math.log(self.D-v+0.5)-math.log(v+0.5)
def sim(self, doc, index):
score = 0
for word in doc:
if word not in self.f[index]:
continue
d = len(self.docs[index])
score += (self.idf[word]*self.f[index][word]*(self.k1+1)
/ (self.f[index][word]+self.k1*(1-self.b+self.b*d
/ self.avgdl)))
return score
def simall(self, doc):
scores = []
for index in range(self.D):
score = self.sim(doc, index)
scores.append(score)
return scores
| mit | -8,927,470,298,898,820,000 | 27.294118 | 71 | 0.435204 | false | 3.562963 | false | false | false |
CTSRD-CHERI/u-boot | tools/binman/etype/vblock.py | 5 | 3159 | # SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2018 Google, Inc
# Written by Simon Glass <sjg@chromium.org>
#
# Support for a Chromium OS verified boot block, used to sign a read-write
# section of the image.
from collections import OrderedDict
import os
from binman.entry import Entry, EntryArg
from dtoc import fdt_util
from patman import tools
class Entry_vblock(Entry):
"""An entry which contains a Chromium OS verified boot block
Properties / Entry arguments:
- content: List of phandles to entries to sign
- keydir: Directory containing the public keys to use
- keyblock: Name of the key file to use (inside keydir)
- signprivate: Name of provide key file to use (inside keydir)
- version: Version number of the vblock (typically 1)
- kernelkey: Name of the kernel key to use (inside keydir)
- preamble-flags: Value of the vboot preamble flags (typically 0)
Output files:
- input.<unique_name> - input file passed to futility
- vblock.<unique_name> - output file generated by futility (which is
used as the entry contents)
Chromium OS signs the read-write firmware and kernel, writing the signature
in this block. This allows U-Boot to verify that the next firmware stage
and kernel are genuine.
"""
def __init__(self, section, etype, node):
super().__init__(section, etype, node)
self.content = fdt_util.GetPhandleList(self._node, 'content')
if not self.content:
self.Raise("Vblock must have a 'content' property")
(self.keydir, self.keyblock, self.signprivate, self.version,
self.kernelkey, self.preamble_flags) = self.GetEntryArgsOrProps([
EntryArg('keydir', str),
EntryArg('keyblock', str),
EntryArg('signprivate', str),
EntryArg('version', int),
EntryArg('kernelkey', str),
EntryArg('preamble-flags', int)])
def ObtainContents(self):
# Join up the data files to be signed
input_data = b''
for entry_phandle in self.content:
data = self.section.GetContentsByPhandle(entry_phandle, self)
if data is None:
# Data not available yet
return False
input_data += data
uniq = self.GetUniqueName()
output_fname = tools.GetOutputFilename('vblock.%s' % uniq)
input_fname = tools.GetOutputFilename('input.%s' % uniq)
tools.WriteFile(input_fname, input_data)
prefix = self.keydir + '/'
args = [
'vbutil_firmware',
'--vblock', output_fname,
'--keyblock', prefix + self.keyblock,
'--signprivate', prefix + self.signprivate,
'--version', '%d' % self.version,
'--fv', input_fname,
'--kernelkey', prefix + self.kernelkey,
'--flags', '%d' % self.preamble_flags,
]
#out.Notice("Sign '%s' into %s" % (', '.join(self.value), self.label))
stdout = tools.Run('futility', *args)
self.SetContents(tools.ReadFile(output_fname))
return True
| gpl-2.0 | 4,337,053,200,046,043,600 | 38.4875 | 79 | 0.617284 | false | 3.993679 | false | false | false |
oksome/Tumulus | setup.py | 1 | 2102 | # -*- coding: utf-8 -*-
# This file is part of Tumulus.
#
# Copyright (C) 2013 OKso (http://okso.me)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup
with open('README.rst') as file:
long_description = file.read()
setup(name='Tumulus',
version='0.2.1',
description='Python HTML Generator for Recyclable Web Elements',
long_description=long_description,
author='OKso.me',
author_email='@okso.me',
url='https://github.com/oksome/Tumulus/',
packages=['tumulus', 'tumulus.lib', 'tumulus.plugins'],
install_requires=['beautifulsoup4', 'html5lib'],
license='AGPLv3',
keywords="html generator template templating engine",
classifiers=['Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Programming Language :: Python :: 3',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Code Generators',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML '
],
)
| agpl-3.0 | 3,843,013,790,277,440,500 | 41.897959 | 85 | 0.623692 | false | 4.379167 | false | false | false |
SciLifeLab/genomics-status | status/user_preferences.py | 1 | 1433 | import tornado.web
import json
import time
import copy
import base64
import requests
from status.util import SafeHandler
from status.projects import PresetsHandler as ph
class UserPrefPageHandler(SafeHandler):
""" Serves a modal with user preferences and saves them
URL: /userpref
"""
def get(self):
t = self.application.loader.load("user_preferences.html")
notf_pref = ph.get_user_details(self.application, self.get_current_user().email).get('notification_preferences', 'Both')
self.write(t.generate(pref=notf_pref))
def post(self):
option = json.loads(self.request.body)
doc = ph.get_user_details(self.application, self.get_current_user().email)
doc['notification_preferences'] = option['notification_preferences']
try:
self.application.gs_users_db.save(doc)
except Exception as e:
self.set_status(400)
self.write(e.message)
self.set_status(201)
self.write({'success': 'success!!'})
class UserPrefPageHandler_b5(UserPrefPageHandler):
""" Serves a modal with user preferences and saves them
URL: /userpref_b5
"""
def get(self):
t = self.application.loader.load("user_preferences_b5.html")
notf_pref = ph.get_user_details(self.application, self.get_current_user().email).get('notification_preferences', 'Both')
self.write(t.generate(pref=notf_pref))
| mit | 200,218,935,690,804,740 | 33.119048 | 128 | 0.675506 | false | 3.712435 | false | false | false |
Ircam-Web/mezzanine-organization | organization/job/migrations/0004_candidacy.py | 1 | 3355 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-30 10:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mezzanine.core.fields
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('sites', '0002_alter_domain_unique'),
('organization-job', '0003_auto_20160929_1833'),
]
operations = [
migrations.CreateModel(
name='Candidacy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('keywords_string', models.CharField(blank=True, editable=False, max_length=500)),
('title', models.CharField(max_length=500, verbose_name='Title')),
('slug', models.CharField(blank=True, help_text='Leave blank to have the URL auto-generated from the title.', max_length=2000, null=True, verbose_name='URL')),
('_meta_title', models.CharField(blank=True, help_text='Optional title to be used in the HTML title tag. If left blank, the main title field will be used.', max_length=500, null=True, verbose_name='Title')),
('description', models.TextField(blank=True, verbose_name='Description')),
('gen_description', models.BooleanField(default=True, help_text='If checked, the description will be automatically generated from content. Uncheck if you want to manually set a custom description.', verbose_name='Generate description')),
('created', models.DateTimeField(editable=False, null=True)),
('updated', models.DateTimeField(editable=False, null=True)),
('status', models.IntegerField(choices=[(1, 'Draft'), (2, 'Published')], default=2, help_text='With Draft chosen, will only be shown for admin users on the site.', verbose_name='Status')),
('publish_date', models.DateTimeField(blank=True, db_index=True, help_text="With Published chosen, won't be shown until this time", null=True, verbose_name='Published from')),
('expiry_date', models.DateTimeField(blank=True, help_text="With Published chosen, won't be shown after this time", null=True, verbose_name='Expires on')),
('short_url', models.URLField(blank=True, null=True)),
('in_sitemap', models.BooleanField(default=True, verbose_name='Show in sitemap')),
('content', mezzanine.core.fields.RichTextField(verbose_name='Content')),
('text_button', models.CharField(blank=True, max_length=150, verbose_name='text button')),
('external_content', models.URLField(blank=True, max_length=1000, verbose_name='external content')),
('object_id', models.PositiveIntegerField(editable=False, null=True, verbose_name='related object')),
('content_type', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType', verbose_name='local content')),
('site', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to='sites.Site')),
],
options={
'verbose_name': 'candidacy',
},
),
]
| agpl-3.0 | 1,642,542,477,276,891,100 | 70.382979 | 253 | 0.645007 | false | 4.157373 | false | false | false |
jbloomlab/dms_tools2 | tests/test_dssp.py | 1 | 1099 | """Tests `dms_tools2.dssp` module."""
import os
import unittest
import numpy
import pandas
import dms_tools2.dssp
class test_processDSSP(unittest.TestCase):
"""Tests `dms_tools2.dssp.processDSSP`."""
def test_processDSSP(self):
"""Tests `dms_tools2.dssp.processDSSP`."""
testdir = os.path.join(os.path.dirname(__file__),
'dssp_input_files')
dsspfile = os.path.join(testdir, '1RVX_trimer_sequentialnumbering.dssp')
df = dms_tools2.dssp.processDSSP(dsspfile, 'A').sort_values('site')
expected = (pandas.read_csv(os.path.join(testdir, 'expected_output.csv'))
.sort_values('site'))
for c in ['site', 'ASA', 'RSA']:
self.assertTrue(numpy.allclose(expected[c], df[c]),
'mismatch for {0}'.format(c))
self.assertTrue(all(expected['SS'].values == df['SS'].values))
self.assertTrue(all(expected['SS_class'].values ==
df['SS_class'].values))
if __name__ == '__main__':
runner = unittest.TextTestRunner()
unittest.main(testRunner=runner)
| gpl-3.0 | -5,734,523,575,777,582,000 | 33.34375 | 81 | 0.604186 | false | 3.261128 | true | false | false |
dahaic/outerspace | server/lib/medusa/filesys.py | 2 | 13242 | # -*- Mode: Python; tab-width: 4 -*-
# $Id$
# Author: Sam Rushing <rushing@nightmare.com>
#
# Generic filesystem interface.
#
# We want to provide a complete wrapper around any and all
# filesystem operations.
# this class is really just for documentation,
# identifying the API for a filesystem object.
# opening files for reading, and listing directories, should
# return a producer.
class abstract_filesystem:
def __init__ (self):
pass
def current_directory (self):
"Return a string representing the current directory."
pass
def listdir (self, path, long=0):
"""Return a listing of the directory at 'path' The empty string
indicates the current directory. If 'long' is set, instead
return a list of (name, stat_info) tuples
"""
pass
def open (self, path, mode):
"Return an open file object"
pass
def stat (self, path):
"Return the equivalent of os.stat() on the given path."
pass
def isdir (self, path):
"Does the path represent a directory?"
pass
def isfile (self, path):
"Does the path represent a plain file?"
pass
def cwd (self, path):
"Change the working directory."
pass
def cdup (self):
"Change to the parent of the current directory."
pass
def longify (self, path):
"""Return a 'long' representation of the filename
[for the output of the LIST command]"""
pass
# standard wrapper around a unix-like filesystem, with a 'false root'
# capability.
# security considerations: can symbolic links be used to 'escape' the
# root? should we allow it? if not, then we could scan the
# filesystem on startup, but that would not help if they were added
# later. We will probably need to check for symlinks in the cwd method.
# what to do if wd is an invalid directory?
import os
import stat
import string
def safe_stat (path):
try:
return (path, os.stat (path))
except:
return None
import re
import glob
class os_filesystem:
path_module = os.path
# set this to zero if you want to disable pathname globbing.
# [we currently don't glob, anyway]
do_globbing = 1
def __init__ (self, root, wd='/'):
self.root = root
self.wd = wd
def current_directory (self):
return self.wd
def isfile (self, path):
p = self.normalize (self.path_module.join (self.wd, path))
return self.path_module.isfile (self.translate(p))
def isdir (self, path):
p = self.normalize (self.path_module.join (self.wd, path))
return self.path_module.isdir (self.translate(p))
def cwd (self, path):
p = self.normalize (self.path_module.join (self.wd, path))
translated_path = self.translate(p)
if not self.path_module.isdir (translated_path):
return 0
else:
old_dir = os.getcwd()
# temporarily change to that directory, in order
# to see if we have permission to do so.
try:
can = 0
try:
os.chdir (translated_path)
can = 1
self.wd = p
except:
pass
finally:
if can:
os.chdir (old_dir)
return can
def cdup (self):
return self.cwd ('..')
def listdir (self, path, long=0):
p = self.translate (path)
# I think we should glob, but limit it to the current
# directory only.
ld = os.listdir (p)
if not long:
return list_producer (ld, 0, None)
else:
old_dir = os.getcwd()
try:
os.chdir (p)
# if os.stat fails we ignore that file.
result = filter (None, map (safe_stat, ld))
finally:
os.chdir (old_dir)
return list_producer (result, 1, self.longify)
# TODO: implement a cache w/timeout for stat()
def stat (self, path):
p = self.translate (path)
return os.stat (p)
def open (self, path, mode):
p = self.translate (path)
return open (p, mode)
def unlink (self, path):
p = self.translate (path)
return os.unlink (p)
def mkdir (self, path):
p = self.translate (path)
return os.mkdir (p)
def rmdir (self, path):
p = self.translate (path)
return os.rmdir (p)
# utility methods
def normalize (self, path):
# watch for the ever-sneaky '/+' path element
path = re.sub ('/+', '/', path)
p = self.path_module.normpath (path)
# remove 'dangling' cdup's.
if len(p) > 2 and p[:3] == '/..':
p = '/'
return p
def translate (self, path):
# we need to join together three separate
# path components, and do it safely.
# <real_root>/<current_directory>/<path>
# use the operating system's path separator.
path = string.join (string.split (path, '/'), os.sep)
p = self.normalize (self.path_module.join (self.wd, path))
p = self.normalize (self.path_module.join (self.root, p[1:]))
return p
def longify (self, (path, stat_info)):
return unix_longify (path, stat_info)
def __repr__ (self):
return '<unix-style fs root:%s wd:%s>' % (
self.root,
self.wd
)
if os.name == 'posix':
class unix_filesystem (os_filesystem):
pass
class schizophrenic_unix_filesystem (os_filesystem):
PROCESS_UID = os.getuid()
PROCESS_EUID = os.geteuid()
PROCESS_GID = os.getgid()
PROCESS_EGID = os.getegid()
def __init__ (self, root, wd='/', persona=(None, None)):
os_filesystem.__init__ (self, root, wd)
self.persona = persona
def become_persona (self):
if self.persona is not (None, None):
uid, gid = self.persona
# the order of these is important!
os.setegid (gid)
os.seteuid (uid)
def become_nobody (self):
if self.persona is not (None, None):
os.seteuid (self.PROCESS_UID)
os.setegid (self.PROCESS_GID)
# cwd, cdup, open, listdir
def cwd (self, path):
try:
self.become_persona()
return os_filesystem.cwd (self, path)
finally:
self.become_nobody()
def cdup (self, path):
try:
self.become_persona()
return os_filesystem.cdup (self)
finally:
self.become_nobody()
def open (self, filename, mode):
try:
self.become_persona()
return os_filesystem.open (self, filename, mode)
finally:
self.become_nobody()
def listdir (self, path, long=0):
try:
self.become_persona()
return os_filesystem.listdir (self, path, long)
finally:
self.become_nobody()
# This hasn't been very reliable across different platforms.
# maybe think about a separate 'directory server'.
#
# import posixpath
# import fcntl
# import FCNTL
# import select
# import asyncore
#
# # pipes /bin/ls for directory listings.
# class unix_filesystem (os_filesystem):
# pass
# path_module = posixpath
#
# def listdir (self, path, long=0):
# p = self.translate (path)
# if not long:
# return list_producer (os.listdir (p), 0, None)
# else:
# command = '/bin/ls -l %s' % p
# print 'opening pipe to "%s"' % command
# fd = os.popen (command, 'rt')
# return pipe_channel (fd)
#
# # this is both a dispatcher, _and_ a producer
# class pipe_channel (asyncore.file_dispatcher):
# buffer_size = 4096
#
# def __init__ (self, fd):
# asyncore.file_dispatcher.__init__ (self, fd)
# self.fd = fd
# self.done = 0
# self.data = ''
#
# def handle_read (self):
# if len (self.data) < self.buffer_size:
# self.data = self.data + self.fd.read (self.buffer_size)
# #print '%s.handle_read() => len(self.data) == %d' % (self, len(self.data))
#
# def handle_expt (self):
# #print '%s.handle_expt()' % self
# self.done = 1
#
# def ready (self):
# #print '%s.ready() => %d' % (self, len(self.data))
# return ((len (self.data) > 0) or self.done)
#
# def more (self):
# if self.data:
# r = self.data
# self.data = ''
# elif self.done:
# self.close()
# self.downstream.finished()
# r = ''
# else:
# r = None
# #print '%s.more() => %s' % (self, (r and len(r)))
# return r
# For the 'real' root, we could obtain a list of drives, and then
# use that. Doesn't win32 provide such a 'real' filesystem?
# [yes, I think something like this "\\.\c\windows"]
class msdos_filesystem (os_filesystem):
def longify (self, (path, stat_info)):
return msdos_longify (path, stat_info)
# A merged filesystem will let you plug other filesystems together.
# We really need the equivalent of a 'mount' capability - this seems
# to be the most general idea. So you'd use a 'mount' method to place
# another filesystem somewhere in the hierarchy.
# Note: this is most likely how I will handle ~user directories
# with the http server.
class merged_filesystem:
def __init__ (self, *fsys):
pass
# this matches the output of NT's ftp server (when in
# MSDOS mode) exactly.
def msdos_longify (file, stat_info):
if stat.S_ISDIR (stat_info[stat.ST_MODE]):
dir = '<DIR>'
else:
dir = ' '
date = msdos_date (stat_info[stat.ST_MTIME])
return '%s %s %8d %s' % (
date,
dir,
stat_info[stat.ST_SIZE],
file
)
def msdos_date (t):
try:
info = time.gmtime (t)
except:
info = time.gmtime (0)
# year, month, day, hour, minute, second, ...
if info[3] > 11:
merid = 'PM'
info[3] = info[3] - 12
else:
merid = 'AM'
return '%02d-%02d-%02d %02d:%02d%s' % (
info[1],
info[2],
info[0]%100,
info[3],
info[4],
merid
)
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
mode_table = {
'0':'---',
'1':'--x',
'2':'-w-',
'3':'-wx',
'4':'r--',
'5':'r-x',
'6':'rw-',
'7':'rwx'
}
import time
def unix_longify (file, stat_info):
# for now, only pay attention to the lower bits
mode = ('%o' % stat_info[stat.ST_MODE])[-3:]
mode = string.join (map (lambda x: mode_table[x], mode), '')
if stat.S_ISDIR (stat_info[stat.ST_MODE]):
dirchar = 'd'
else:
dirchar = '-'
date = ls_date (long(time.time()), stat_info[stat.ST_MTIME])
return '%s%s %3d %-8d %-8d %8d %s %s' % (
dirchar,
mode,
stat_info[stat.ST_NLINK],
stat_info[stat.ST_UID],
stat_info[stat.ST_GID],
stat_info[stat.ST_SIZE],
date,
file
)
# Emulate the unix 'ls' command's date field.
# it has two formats - if the date is more than 180
# days in the past, then it's like this:
# Oct 19 1995
# otherwise, it looks like this:
# Oct 19 17:33
def ls_date (now, t):
try:
info = time.gmtime (t)
except:
info = time.gmtime (0)
# 15,600,000 == 86,400 * 180
if (now - t) > 15600000:
return '%s %2d %d' % (
months[info[1]-1],
info[2],
info[0]
)
else:
return '%s %2d %02d:%02d' % (
months[info[1]-1],
info[2],
info[3],
info[4]
)
# ===========================================================================
# Producers
# ===========================================================================
class list_producer:
def __init__ (self, file_list, long, longify):
self.file_list = file_list
self.long = long
self.longify = longify
self.done = 0
def ready (self):
if len(self.file_list):
return 1
else:
if not self.done:
self.done = 1
return 0
return (len(self.file_list) > 0)
# this should do a pushd/popd
def more (self):
if not self.file_list:
return ''
else:
# do a few at a time
bunch = self.file_list[:50]
if self.long:
bunch = map (self.longify, bunch)
self.file_list = self.file_list[50:]
return string.joinfields (bunch, '\r\n') + '\r\n'
| gpl-2.0 | -7,760,936,704,069,886,000 | 27.416309 | 88 | 0.518577 | false | 3.653974 | false | false | false |
klette/pyroutes | pyroutes/http/request.py | 1 | 5102 | # encoding: utf-8
"""
This module contains only the Request class, a key class in pyroutes. Request
objects hold all meta about incoming requests.
"""
from cgi import parse_qsl, FieldStorage
from pyroutes.http.cookies import RequestCookieHandler
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class Request(object):
"""
The pyroutes Request object.
Contains all information about a request,
like GET/POST and environment data.
"""
def __init__(self, environment):
self.GET = {}
self.POST = {}
self.PUT = {}
self.FILES = {}
self.ENV = environment
self.extract_get_data()
self.extract_post_data()
self.extract_put_data()
self.COOKIES = RequestCookieHandler(environment)
self.params = {}
self.matched_path = None
def __repr__(self):
values = (self.GET, self.POST, self.PUT, self.COOKIES,
self.FILES.keys())
return "GET: %s\nPOST: %s\nPUT: %s\nCOOKIES: %s\nFILES: %s" % values
def extract_put_data(self):
"""Extracts the file pointer from a PUT request.
The PUT method allows you to write the contents of the file to the
socket connection that is established with the server directly.
According to the [HTTP/1.1 specification (RFC2616)][0], the server
must return a status code of 201 (Created) if the file in question
is newly created, and 200 (OK) or 204 (No Content) if the request
results in a successful update.
When using the POST method, all the fields and files are combined
into a single multipart/form-data type object, and this has to be
decoded by the server side handler.
[0]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html
"""
if self.ENV.get('REQUEST_METHOD', 'GET') == 'PUT':
if hasattr(self.ENV['wsgi.input'], 'read'):
self.PUT = self.ENV['wsgi.input']
def extract_post_data(self):
"Populates the POST variable"
data = {}
# Copy enviroment so we dont get GET-variables in the result.
env = self.ENV.copy()
env['QUERY_STRING'] = ''
if env.get('REQUEST_METHOD', 'GET') == 'POST':
_data = FieldStorage(
fp=self.ENV['wsgi.input'],
environ=env,
keep_blank_values=False
)
for key in _data.keys():
value = self._parse_field(_data[key], key, _data)
if value is not None:
self._assign_field_to_section(key, value, data)
self.POST = data
def extract_get_data(self):
"Populates the GET variable from environment"
ret_dict = {}
for (key, value) in parse_qsl(self.ENV.get('QUERY_STRING', '')):
if key in ret_dict:
if not isinstance(ret_dict[key], list):
ret_dict[key] = [ret_dict[key]]
ret_dict[key].append(value)
else:
ret_dict[key] = value
self.GET = ret_dict
def _assign_field_to_section(self, key, value, storage):
if isinstance(value, list):
for val in value:
self._assign_field_to_section(key, val, storage)
else:
if (isinstance(value, tuple) and value[1] and
(isinstance(value[1], file) or hasattr(value[1], 'read'))):
# If an existing value exists for this key, convert to
# list-result
if key in self.FILES and \
not isinstance(self.FILES[key], list):
self.FILES[key] = [self.FILES[key]]
if key in self.FILES and isinstance(self.FILES[key], list):
self.FILES[key].append(value)
else:
self.FILES[key] = value
elif isinstance(value, basestring):
# If an existing value exists for this key,
# convert to list-result
if key in storage and not isinstance(storage[key], list):
storage[key] = [storage[key]]
if key in storage and isinstance(storage[key], list):
storage[key].append(value)
else:
storage[key] = value
def _parse_field(self, field, key, data):
value = data.getvalue(key)
if isinstance(field, list):
value = [self._parse_field(f, key, data) for f in field]
elif hasattr(field, 'filename') and field.filename:
if field.file:
value = (field.filename, field.file)
else:
value = (field.filename, StringIO(data.getvalue(key)))
elif isinstance(value, basestring):
try:
value = unicode(value, 'utf-8')
except UnicodeDecodeError:
# If we can't understand the data as utf, try latin1
value = unicode(value, 'iso-8859-1')
return value
| gpl-2.0 | -4,441,000,111,228,115,500 | 33.472973 | 77 | 0.558212 | false | 4.202636 | false | false | false |
tyarkoni/transitions | transitions/extensions/nesting.py | 2 | 49675 | # -*- coding: utf-8 -*-
from collections import OrderedDict, defaultdict
import copy
from functools import partial, reduce
import inspect
import logging
from six import string_types
from ..core import State, Machine, Transition, Event, listify, MachineError, Enum, EnumMeta, EventData
_LOGGER = logging.getLogger(__name__)
_LOGGER.addHandler(logging.NullHandler())
# this is a workaround for dill issues when partials and super is used in conjunction
# without it, Python 3.0 - 3.3 will not support pickling
# https://github.com/pytransitions/transitions/issues/236
_super = super
# converts a hierarchical tree into a list of current states
def _build_state_list(state_tree, separator, prefix=[]):
res = []
for key, value in state_tree.items():
if value:
res.append(_build_state_list(value, separator, prefix=prefix + [key]))
else:
res.append(separator.join(prefix + [key]))
return res if len(res) > 1 else res[0]
# custom breadth-first tree exploration
# makes sure that ALL children are evaluated before parents in parallel states
def _resolve_order(state_tree):
s = state_tree
q = []
res = []
p = []
while True:
for k in reversed(list(s.keys())):
pk = p + [k]
res.append(pk)
if s[k]:
q.append((pk, s[k]))
if not q:
break
p, s = q.pop(0)
return reversed(res)
class FunctionWrapper(object):
""" A wrapper to enable transitions' convenience function to_<state> for nested states.
This allows to call model.to_A.s1.C() in case a custom separator has been chosen."""
def __init__(self, func, path):
"""
Args:
func: Function to be called at the end of the path.
path: If path is an empty string, assign function
"""
if path:
self.add(func, path)
self._func = None
else:
self._func = func
def add(self, func, path):
""" Assigns a `FunctionWrapper` as an attribute named like the next segment of the substates
path.
Args:
func (callable): Function to be called at the end of the path.
path (string): Remaining segment of the substate path.
"""
if path:
name = path[0]
if name[0].isdigit():
name = 's' + name
if hasattr(self, name):
getattr(self, name).add(func, path[1:])
else:
setattr(self, name, FunctionWrapper(func, path[1:]))
else:
self._func = func
def __call__(self, *args, **kwargs):
return self._func(*args, **kwargs)
class NestedEvent(Event):
""" An event type to work with nested states.
This subclass is NOT compatible with simple Machine instances.
"""
def trigger(self, _model, _machine, *args, **kwargs):
""" Serially execute all transitions that match the current state,
halting as soon as one successfully completes. NOTE: This should only
be called by HierarchicalMachine instances.
Args:
_model (object): model object to
_machine (HierarchicalMachine): Since NestedEvents can be used in multiple machine instances, this one
will be used to determine the current state separator.
args and kwargs: Optional positional or named arguments that will
be passed onto the EventData object, enabling arbitrary state
information to be passed on to downstream triggered functions.
Returns: boolean indicating whether or not a transition was
successfully executed (True if successful, False if not).
"""
func = partial(self._trigger, _model, _machine, *args, **kwargs)
# pylint: disable=protected-access
# noinspection PyProtectedMember
# Machine._process should not be called somewhere else. That's why it should not be exposed
# to Machine users.
return _machine._process(func)
def _trigger(self, _model, _machine, *args, **kwargs):
state_tree = _machine._build_state_tree(getattr(_model, _machine.model_attribute), _machine.state_cls.separator)
state_tree = reduce(dict.get, _machine.get_global_name(join=False), state_tree)
ordered_states = _resolve_order(state_tree)
done = set()
res = None
for state_path in ordered_states:
state_name = _machine.state_cls.separator.join(state_path)
if state_name not in done and state_name in self.transitions:
state = _machine.get_state(state_name)
event_data = EventData(state, self, _machine, _model, args=args, kwargs=kwargs)
event_data.source_name = state_name
event_data.source_path = copy.copy(state_path)
res = self._process(event_data)
if res:
elems = state_path
while elems:
done.add(_machine.state_cls.separator.join(elems))
elems.pop()
return res
def _process(self, event_data):
machine = event_data.machine
machine.callbacks(event_data.machine.prepare_event, event_data)
_LOGGER.debug("%sExecuted machine preparation callbacks before conditions.", machine.name)
try:
for trans in self.transitions[event_data.source_name]:
event_data.transition = trans
if trans.execute(event_data):
event_data.result = True
break
except Exception as err:
event_data.error = err
if self.machine.on_exception:
self.machine.callbacks(self.machine.on_exception, event_data)
else:
raise
finally:
try:
machine.callbacks(machine.finalize_event, event_data)
_LOGGER.debug("%sExecuted machine finalize callbacks", machine.name)
except Exception as err:
_LOGGER.error("%sWhile executing finalize callbacks a %s occurred: %s.",
self.machine.name,
type(err).__name__,
str(err))
return event_data.result
class NestedState(State):
""" A state which allows substates.
Attributes:
states (OrderedDict): A list of substates of the current state.
events (dict): A list of events defined for the nested state.
initial (list, str, NestedState or Enum): (Name of a) child or list of children that should be entered when the state is entered.
exit_stack (defaultdict): A list of currently active substates
"""
separator = '_'
u""" Separator between the names of parent and child states. In case '_' is required for
naming state, this value can be set to other values such as '.' or even unicode characters
such as 'β¦' (limited to Python 3 though).
"""
def __init__(self, name, on_enter=None, on_exit=None, ignore_invalid_triggers=None, initial=None):
_super(NestedState, self).__init__(name=name, on_enter=on_enter, on_exit=on_exit,
ignore_invalid_triggers=ignore_invalid_triggers)
self.initial = initial
self.events = {}
self.states = OrderedDict()
self._scope = []
def add_substate(self, state):
""" Adds a state as a substate.
Args:
state (NestedState): State to add to the current state.
"""
self.add_substates(state)
def add_substates(self, states):
""" Adds a list of states to the current state.
Args:
states (list): List of states to add to the current state.
"""
for state in listify(states):
self.states[state.name] = state
def scoped_enter(self, event_data, scope=[]):
self._scope = scope
try:
self.enter(event_data)
finally:
self._scope = []
def scoped_exit(self, event_data, scope=[]):
self._scope = scope
try:
self.exit(event_data)
finally:
self._scope = []
@property
def name(self):
return self.separator.join(self._scope + [_super(NestedState, self).name])
class NestedTransition(Transition):
""" A transition which handles entering and leaving nested states.
Attributes:
source (str): Source state of the transition.
dest (str): Destination state of the transition.
prepare (list): Callbacks executed before conditions checks.
conditions (list): Callbacks evaluated to determine if
the transition should be executed.
before (list): Callbacks executed before the transition is executed
but only if condition checks have been successful.
after (list): Callbacks executed after the transition is executed
but only if condition checks have been successful.
"""
def _resolve_transition(self, event_data):
machine = event_data.machine
dst_name_path = machine.get_local_name(self.dest, join=False)
_ = machine.get_state(dst_name_path)
model_states = listify(getattr(event_data.model, machine.model_attribute))
state_tree = machine._build_state_tree(model_states, machine.state_cls.separator)
scope = machine.get_global_name(join=False)
src_name_path = event_data.source_path
if src_name_path == dst_name_path:
root = src_name_path[:-1] # exit and enter the same state
dst_name_path = dst_name_path[-1:]
else:
root = []
while dst_name_path and src_name_path and src_name_path[0] == dst_name_path[0]:
root.append(src_name_path.pop(0))
dst_name_path.pop(0)
scoped_tree = reduce(dict.get, scope + root, state_tree)
exit_partials = [partial(machine.get_state(root + state_name).scoped_exit,
event_data, scope + root + state_name[:-1])
for state_name in _resolve_order(scoped_tree)]
if dst_name_path:
new_states, enter_partials = self._enter_nested(root, dst_name_path, scope + root, event_data)
else:
new_states, enter_partials = {}, []
scoped_tree.clear()
for new_key, value in new_states.items():
scoped_tree[new_key] = value
break
return state_tree, exit_partials, enter_partials
def _change_state(self, event_data):
state_tree, exit_partials, enter_partials = self._resolve_transition(event_data)
for func in exit_partials:
func()
self._update_model(event_data, state_tree)
for func in enter_partials:
func()
def _enter_nested(self, root, dest, prefix_path, event_data):
if root:
state_name = root.pop(0)
with event_data.machine(state_name):
return self._enter_nested(root, dest, prefix_path, event_data)
elif dest:
new_states = OrderedDict()
state_name = dest.pop(0)
with event_data.machine(state_name):
new_states[state_name], new_enter = self._enter_nested([], dest, prefix_path + [state_name], event_data)
enter_partials = [partial(event_data.machine.scoped.scoped_enter, event_data, prefix_path)] + new_enter
return new_states, enter_partials
elif event_data.machine.scoped.initial:
new_states = OrderedDict()
enter_partials = []
q = []
prefix = prefix_path
scoped_tree = new_states
initial_names = [i.name if hasattr(i, 'name') else i for i in listify(event_data.machine.scoped.initial)]
initial_states = [event_data.machine.scoped.states[n] for n in initial_names]
while True:
event_data.scope = prefix
for state in initial_states:
enter_partials.append(partial(state.scoped_enter, event_data, prefix))
scoped_tree[state.name] = OrderedDict()
if state.initial:
q.append((scoped_tree[state.name], prefix + [state.name],
[state.states[i.name] if hasattr(i, 'name') else state.states[i]
for i in listify(state.initial)]))
if not q:
break
scoped_tree, prefix, initial_states = q.pop(0)
return new_states, enter_partials
else:
return {}, []
@staticmethod
def _update_model(event_data, tree):
model_states = _build_state_list(tree, event_data.machine.state_cls.separator)
with event_data.machine():
event_data.machine.set_state(model_states, event_data.model)
states = event_data.machine.get_states(listify(model_states))
event_data.state = states[0] if len(states) == 1 else states
# Prevent deep copying of callback lists since these include either references to callable or
# strings. Deep copying a method reference would lead to the creation of an entire new (model) object
# (see https://github.com/pytransitions/transitions/issues/248)
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for key, value in self.__dict__.items():
if key in cls.dynamic_methods:
setattr(result, key, copy.copy(value))
else:
setattr(result, key, copy.deepcopy(value, memo))
return result
class HierarchicalMachine(Machine):
""" Extends transitions.core.Machine by capabilities to handle nested states.
A hierarchical machine REQUIRES NestedStates, NestedEvent and NestedTransitions
(or any subclass of it) to operate.
"""
state_cls = NestedState
transition_cls = NestedTransition
event_cls = NestedEvent
def __init__(self, *args, **kwargs):
assert issubclass(self.state_cls, NestedState)
assert issubclass(self.event_cls, NestedEvent)
assert issubclass(self.transition_cls, NestedTransition)
self._stack = []
self.scoped = self
_super(HierarchicalMachine, self).__init__(*args, **kwargs)
def __call__(self, to_scope=None):
if isinstance(to_scope, string_types):
state_name = to_scope.split(self.state_cls.separator)[0]
state = self.states[state_name]
to_scope = (state, state.states, state.events)
elif isinstance(to_scope, Enum):
state = self.states[to_scope.name]
to_scope = (state, state.states, state.events)
elif to_scope is None:
if self._stack:
to_scope = self._stack[0]
else:
to_scope = (self, self.states, self.events)
self._next_scope = to_scope
return self
def __enter__(self):
self._stack.append((self.scoped, self.states, self.events))
self.scoped, self.states, self.events = self._next_scope
self._next_scope = None
def __exit__(self, exc_type, exc_val, exc_tb):
self.scoped, self.states, self.events = self._stack.pop()
def add_model(self, model, initial=None):
""" Extends transitions.core.Machine.add_model by applying a custom 'to' function to
the added model.
"""
models = [mod if mod != 'self' else self for mod in listify(model)]
_super(HierarchicalMachine, self).add_model(models, initial=initial)
initial_name = getattr(models[0], self.model_attribute)
if hasattr(initial_name, 'name'):
initial_name = initial_name.name
# initial states set by add_model or machine might contain initial states themselves.
if isinstance(initial_name, string_types):
initial_states = self._resolve_initial(models, initial_name.split(self.state_cls.separator))
# when initial is set to a (parallel) state, we accept it as it is
else:
initial_states = initial_name
for mod in models:
self.set_state(initial_states, mod)
if hasattr(mod, 'to'):
_LOGGER.warning("%sModel already has a 'to'-method. It will NOT "
"be overwritten by NestedMachine", self.name)
else:
to_func = partial(self.to_state, mod)
setattr(mod, 'to', to_func)
@property
def initial(self):
""" Return the initial state. """
return self._initial
@initial.setter
def initial(self, value):
self._initial = self._recursive_initial(value)
def add_ordered_transitions(self, states=None, trigger='next_state',
loop=True, loop_includes_initial=True,
conditions=None, unless=None, before=None,
after=None, prepare=None, **kwargs):
if states is None:
states = self.get_nested_state_names()
_super(HierarchicalMachine, self).add_ordered_transitions(states=states, trigger=trigger, loop=loop,
loop_includes_initial=loop_includes_initial,
conditions=conditions,
unless=unless, before=before, after=after,
prepare=prepare, **kwargs)
def add_states(self, states, on_enter=None, on_exit=None, ignore_invalid_triggers=None, **kwargs):
""" Add new nested state(s).
Args:
states (list, str, dict, Enum, NestedState or Machine): a list, a NestedState instance, the
name of a new state, an enumeration (member) or a dict with keywords to pass on to the
NestedState initializer. If a list, each element can be a string, dict, NestedState or
enumeration member.
on_enter (str or list): callbacks to trigger when the state is
entered. Only valid if first argument is string.
on_exit (str or list): callbacks to trigger when the state is
exited. Only valid if first argument is string.
ignore_invalid_triggers: when True, any calls to trigger methods
that are not valid for the present state (e.g., calling an
a_to_b() trigger when the current state is c) will be silently
ignored rather than raising an invalid transition exception.
Note that this argument takes precedence over the same
argument defined at the Machine level, and is in turn
overridden by any ignore_invalid_triggers explicitly
passed in an individual state's initialization arguments.
**kwargs additional keyword arguments used by state mixins.
"""
remap = kwargs.pop('remap', None)
ignore = self.ignore_invalid_triggers if ignore_invalid_triggers is None else ignore_invalid_triggers
for state in listify(states):
if isinstance(state, Enum):
if isinstance(state.value, EnumMeta):
state = {'name': state, 'children': state.value}
elif isinstance(state.value, dict):
state = dict(name=state, **state.value)
if isinstance(state, string_types):
if remap is not None and state in remap:
return
domains = state.split(self.state_cls.separator, 1)
if len(domains) > 1:
try:
self.get_state(domains[0])
except ValueError:
self.add_state(domains[0], on_enter=on_enter, on_exit=on_exit, ignore_invalid_triggers=ignore_invalid_triggers, **kwargs)
with self(domains[0]):
self.add_states(domains[1], on_enter=on_enter, on_exit=on_exit, ignore_invalid_triggers=ignore_invalid_triggers, **kwargs)
else:
if state in self.states:
raise ValueError("State {0} cannot be added since it already exists.".format(state))
new_state = self._create_state(state, on_enter=on_enter, on_exit=on_exit,
ignore_invalid_triggers=ignore, **kwargs)
self.states[new_state.name] = new_state
self._init_state(new_state)
elif isinstance(state, Enum):
if self.state_cls.separator in state.name:
raise ValueError("State '{0}' contains '{1}' which is used as state name separator. "
"Consider changing the NestedState.separator to avoid this issue."
"".format(state.name, self.state_cls.separator))
if remap is not None and state.name in remap:
return
new_state = self._create_state(state, on_enter=on_enter, on_exit=on_exit,
ignore_invalid_triggers=ignore, **kwargs)
if state.name in self.states:
raise ValueError("State {0} cannot be added since it already exists.".format(state.name))
self.states[new_state.name] = new_state
self._init_state(new_state)
elif isinstance(state, dict):
if remap is not None and state['name'] in remap:
return
state = state.copy() # prevent messing with the initially passed dict
remap = state.pop('remap', None)
if 'ignore_invalid_triggers' not in state:
state['ignore_invalid_triggers'] = ignore
# parallel: [states] is just a short handle for {children: [states], initial: [state_names]}
state_parallel = state.pop('parallel', [])
if state_parallel:
state_children = state_parallel
state['initial'] = [s['name'] if isinstance(s, dict)
else s for s in state_children]
else:
state_children = state.pop('children', state.pop('states', []))
transitions = state.pop('transitions', [])
new_state = self._create_state(**state)
self.states[new_state.name] = new_state
self._init_state(new_state)
remapped_transitions = []
with self(new_state.name):
self.add_states(state_children, remap=remap, **kwargs)
if remap is not None:
drop_event = []
for evt in self.events.values():
self.events[evt.name] = copy.copy(evt)
for trigger, event in self.events.items():
drop_source = []
event.transitions = copy.deepcopy(event.transitions)
for source_name, trans_source in event.transitions.items():
if source_name in remap:
drop_source.append(source_name)
continue
drop_trans = []
for trans in trans_source:
if trans.dest in remap:
conditions, unless = [], []
for cond in trans.conditions:
# split a list in two lists based on the accessors (cond.target) truth value
(unless, conditions)[cond.target].append(cond.func)
remapped_transitions.append({
'trigger': trigger,
'source': new_state.name + self.state_cls.separator + trans.source,
'dest': remap[trans.dest],
'conditions': conditions,
'unless': unless,
'prepare': trans.prepare,
'before': trans.before,
'after': trans.after})
drop_trans.append(trans)
for t in drop_trans:
trans_source.remove(t)
if not trans_source:
drop_source.append(source_name)
for s in drop_source:
del event.transitions[s]
if not event.transitions:
drop_event.append(trigger)
for e in drop_event:
del self.events[e]
if transitions:
self.add_transitions(transitions)
self.add_transitions(remapped_transitions)
elif isinstance(state, NestedState):
if state.name in self.states:
raise ValueError("State {0} cannot be added since it already exists.".format(state.name))
self.states[state.name] = state
self._init_state(state)
elif isinstance(state, Machine):
new_states = [s for s in state.states.values() if remap is None or s not in remap]
self.add_states(new_states)
for ev in state.events.values():
self.events[ev.name] = ev
if self.scoped.initial is None:
self.scoped.initial = state.initial
elif isinstance(state, State) and not isinstance(state, NestedState):
raise ValueError("A passed state object must derive from NestedState! "
"A default State object is not sufficient")
else:
raise ValueError("Cannot add state of type {0}. ".format(type(state).__name__))
def add_transition(self, trigger, source, dest, conditions=None,
unless=None, before=None, after=None, prepare=None, **kwargs):
if source != self.wildcard_all:
source = [self.state_cls.separator.join(self._get_enum_path(s)) if isinstance(s, Enum) else s
for s in listify(source)]
if dest != self.wildcard_same:
dest = self.state_cls.separator.join(self._get_enum_path(dest)) if isinstance(dest, Enum) else dest
_super(HierarchicalMachine, self).add_transition(trigger, source, dest, conditions,
unless, before, after, prepare, **kwargs)
def get_global_name(self, state=None, join=True):
local_stack = [s[0] for s in self._stack] + [self.scoped]
local_stack_start = len(local_stack) - local_stack[::-1].index(self)
domains = [s.name for s in local_stack[local_stack_start:]]
if state:
state_name = state.name if hasattr(state, 'name') else state
if state_name in self.states:
domains.append(state_name)
else:
raise ValueError("State '{0}' not found in local states.".format(state))
return self.state_cls.separator.join(domains) if join else domains
def get_local_name(self, state_name, join=True):
state_name = state_name.split(self.state_cls.separator)
local_stack = [s[0] for s in self._stack] + [self.scoped]
local_stack_start = len(local_stack) - local_stack[::-1].index(self)
domains = [s.name for s in local_stack[local_stack_start:]]
if domains and state_name and state_name[0] != domains[0]:
return self.state_cls.separator.join(state_name) if join else state_name
return self.state_cls.separator.join(state_name) if join else state_name
def get_nested_state_names(self):
ordered_states = []
for state in self.states.values():
ordered_states.append(self.get_global_name(state))
with self(state.name):
ordered_states.extend(self.get_nested_state_names())
return ordered_states
def get_nested_transitions(self, trigger="", src_path=None, dest_path=None):
if src_path and dest_path:
src = self.state_cls.separator.join(src_path)
dest = self.state_cls.separator.join(dest_path)
transitions = _super(HierarchicalMachine, self).get_transitions(trigger, src, dest)
if len(src_path) > 1 and len(dest_path) > 1:
with self(src_path[0]):
transitions.extend(self.get_nested_transitions(trigger, src_path[1:], dest_path[1:]))
elif src_path:
src = self.state_cls.separator.join(src_path)
transitions = _super(HierarchicalMachine, self).get_transitions(trigger, src, "*")
if len(src_path) > 1:
with self(src_path[0]):
transitions.extend(self.get_nested_transitions(trigger, src_path[1:], None))
elif dest_path:
dest = self.state_cls.separator.join(dest_path)
transitions = _super(HierarchicalMachine, self).get_transitions(trigger, "*", dest)
if len(dest_path) > 1:
for state_name in self.states:
with self(state_name):
transitions.extend(self.get_nested_transitions(trigger, None, dest_path[1:]))
else:
transitions = _super(HierarchicalMachine, self).get_transitions(trigger, "*", "*")
for state_name in self.states:
with self(state_name):
transitions.extend(self.get_nested_transitions(trigger, None, None))
return transitions
def get_nested_triggers(self, src_path=None):
if src_path:
triggers = _super(HierarchicalMachine, self).get_triggers(self.state_cls.separator.join(src_path))
if len(src_path) > 1 and src_path[0] in self.states:
with self(src_path[0]):
triggers.extend(self.get_nested_triggers(src_path[1:]))
else:
triggers = list(self.events.keys())
for state_name in self.states:
with self(state_name):
triggers.extend(self.get_nested_triggers())
return triggers
def get_state(self, state, hint=None):
""" Return the State instance with the passed name. """
if isinstance(state, Enum):
state = self._get_enum_path(state)
elif isinstance(state, string_types):
state = state.split(self.state_cls.separator)
if not hint:
state = copy.copy(state)
hint = copy.copy(state)
if len(state) > 1:
child = state.pop(0)
try:
with self(child):
return self.get_state(state, hint)
except (KeyError, ValueError):
try:
with self():
state = self
for elem in hint:
state = state.states[elem]
return state
except KeyError:
raise ValueError("State '%s' is not a registered state." % self.state_cls.separator.join(hint))
elif state[0] not in self.states:
raise ValueError("State '%s' is not a registered state." % state)
return self.states[state[0]]
def get_states(self, states):
res = []
for state in states:
if isinstance(state, list):
res.append(self.get_states(state))
else:
res.append(self.get_state(state))
return res
def get_transitions(self, trigger="", source="*", dest="*", delegate=False):
""" Return the transitions from the Machine.
Args:
trigger (str): Trigger name of the transition.
source (str, State or Enum): Limits list to transitions from a certain state.
dest (str, State or Enum): Limits list to transitions to a certain state.
delegate (Optional[bool]): If True, consider delegations to parents of source
"""
with self():
source_path = [] if source == "*" \
else source.split(self.state_cls.separator) if isinstance(source, string_types) \
else self._get_enum_path(source) if isinstance(source, Enum) \
else self._get_state_path(source)
dest_path = [] if dest == "*" \
else dest.split(self.state_cls.separator) if isinstance(dest, string_types) \
else self._get_enum_path(dest) if isinstance(dest, Enum) \
else self._get_state_path(dest)
matches = self.get_nested_transitions(trigger, source_path, dest_path)
# only consider delegations when source_path contains a nested state (len > 1)
if delegate is False or len(source_path) < 2:
return matches
source_path.pop()
while source_path:
matches.extend(self.get_transitions(trigger,
source=self.state_cls.separator.join(source_path),
dest=dest))
source_path.pop()
return matches
def get_triggers(self, *args):
""" Extends transitions.core.Machine.get_triggers to also include parent state triggers. """
triggers = []
with self():
for state in args:
state_name = state.name if hasattr(state, 'name') else state
state_path = state_name.split(self.state_cls.separator)
if len(state_path) > 1: # we only need to check substates when 'state_name' refers to a substate
with self(state_path[0]):
triggers.extend(self.get_nested_triggers(state_path[1:]))
while state_path: # check all valid transitions for parent states
triggers.extend(_super(HierarchicalMachine, self).get_triggers(self.state_cls.separator.join(state_path)))
state_path.pop()
return triggers
def has_trigger(self, trigger, state=None):
""" Check whether an event/trigger is known to the machine
Args:
trigger (str): Event/trigger name
state (optional[NestedState]): Limits the recursive search to this state and its children
Returns:
bool: True if event is known and False otherwise
"""
state = state or self
return trigger in state.events or any([self.has_trigger(trigger, sta) for sta in state.states.values()])
def is_state(self, state_name, model, allow_substates=False):
current_name = getattr(model, self.model_attribute)
if allow_substates:
if isinstance(current_name, Enum):
current_name = self.state_cls.separator.join(self._get_enum_path(current_name))
if isinstance(state_name, Enum):
state_name = self.state_cls.separator.join(self._get_enum_path(state_name))
return current_name.startswith(state_name)
return current_name == state_name
def on_enter(self, state_name, callback):
""" Helper function to add callbacks to states in case a custom state separator is used.
Args:
state_name (str): Name of the state
callback (str or callable): Function to be called. Strings will be resolved to model functions.
"""
self.get_state(state_name).add_callback('enter', callback)
def on_exit(self, state_name, callback):
""" Helper function to add callbacks to states in case a custom state separator is used.
Args:
state_name (str): Name of the state
callback (str or callable): Function to be called. Strings will be resolved to model functions.
"""
self.get_state(state_name).add_callback('exit', callback)
def set_state(self, states, model=None):
""" Set the current state.
Args:
states (list of str or Enum or State): value of state(s) to be set
model (optional[object]): targeted model; if not set, all models will be set to 'state'
"""
values = [self._set_state(value) for value in listify(states)]
models = self.models if model is None else listify(model)
for mod in models:
setattr(mod, self.model_attribute, values if len(values) > 1 else values[0])
def to_state(self, model, state_name, *args, **kwargs):
""" Helper function to add go to states in case a custom state separator is used.
Args:
model (class): The model that should be used.
state_name (str): Name of the destination state.
"""
current_state = getattr(model, self.model_attribute)
if isinstance(current_state, list):
raise MachineError("Cannot use 'to_state' from parallel state")
event = EventData(self.get_state(current_state), Event('to', self), self,
model, args=args, kwargs=kwargs)
if isinstance(current_state, Enum):
event.source_path = self._get_enum_path(current_state)
event.source_name = self.state_cls.separator.join(event.source_path)
else:
event.source_name = current_state
event.source_path = current_state.split(self.state_cls.separator)
self._create_transition(event.source_name, state_name).execute(event)
def trigger_event(self, _model, _trigger, *args, **kwargs):
""" Processes events recursively and forwards arguments if suitable events are found.
This function is usually bound to models with model and trigger arguments already
resolved as a partial. Execution will halt when a nested transition has been executed
successfully.
Args:
_model (object): targeted model
_trigger (str): event name
*args: positional parameters passed to the event and its callbacks
**kwargs: keyword arguments passed to the event and its callbacks
Returns:
bool: whether a transition has been executed successfully
Raises:
MachineError: When no suitable transition could be found and ignore_invalid_trigger
is not True. Note that a transition which is not executed due to conditions
is still considered valid.
"""
with self():
res = self._trigger_event(_model, _trigger, None, *args, **kwargs)
return self._check_event_result(res, _model, _trigger)
def _add_model_to_state(self, state, model):
name = self.get_global_name(state)
if self.state_cls.separator == '_':
value = state.value if isinstance(state.value, Enum) else name
self._checked_assignment(model, 'is_%s' % name, partial(self.is_state, value, model))
# Add dynamic method callbacks (enter/exit) if there are existing bound methods in the model
# except if they are already mentioned in 'on_enter/exit' of the defined state
for callback in self.state_cls.dynamic_methods:
method = "{0}_{1}".format(callback, name)
if hasattr(model, method) and inspect.ismethod(getattr(model, method)) and \
method not in getattr(state, callback):
state.add_callback(callback[3:], method)
else:
path = name.split(self.state_cls.separator)
value = state.value if isinstance(state.value, Enum) else name
trig_func = partial(self.is_state, value, model)
if hasattr(model, 'is_' + path[0]):
getattr(model, 'is_' + path[0]).add(trig_func, path[1:])
else:
self._checked_assignment(model, 'is_' + path[0], FunctionWrapper(trig_func, path[1:]))
with self(state.name):
for event in self.events.values():
if not hasattr(model, event.name):
self._add_trigger_to_model(event.name, model)
for state in self.states.values():
self._add_model_to_state(state, model)
def _add_trigger_to_model(self, trigger, model):
trig_func = partial(self.trigger_event, model, trigger)
# FunctionWrappers are only necessary if a custom separator is used
if trigger.startswith('to_') and self.state_cls.separator != '_':
path = trigger[3:].split(self.state_cls.separator)
if hasattr(model, 'to_' + path[0]):
# add path to existing function wrapper
getattr(model, 'to_' + path[0]).add(trig_func, path[1:])
else:
# create a new function wrapper
self._checked_assignment(model, 'to_' + path[0], FunctionWrapper(trig_func, path[1:]))
else:
self._checked_assignment(model, trigger, trig_func)
# converts a list of current states into a hierarchical state tree
def _build_state_tree(self, model_states, separator, tree=None):
tree = tree if tree is not None else OrderedDict()
if isinstance(model_states, list):
for state in model_states:
_ = self._build_state_tree(state, separator, tree)
else:
tmp = tree
if isinstance(model_states, (Enum, EnumMeta)):
with self():
path = self._get_enum_path(model_states)
else:
path = model_states.split(separator)
for elem in path:
tmp = tmp.setdefault(elem.name if hasattr(elem, 'name') else elem, OrderedDict())
return tree
def _get_enum_path(self, enum_state, prefix=[]):
if enum_state.name in self.states and self.states[enum_state.name].value == enum_state:
return prefix + [enum_state.name]
for name in self.states:
with self(name):
res = self._get_enum_path(enum_state, prefix=prefix + [name])
if res:
return res
return []
def _get_state_path(self, state, prefix=[]):
if state in self.states.values():
return prefix + [state.name]
for name in self.states:
with self(name):
res = self._get_state_path(state, prefix=prefix + [name])
if res:
return res
return []
def _check_event_result(self, res, model, trigger):
if res is None:
state_names = getattr(model, self.model_attribute)
msg = "%sCan't trigger event '%s' from state(s) %s!" % (self.name, trigger, state_names)
for state_name in listify(state_names):
state = self.get_state(state_name)
ignore = state.ignore_invalid_triggers if state.ignore_invalid_triggers is not None \
else self.ignore_invalid_triggers
if not ignore:
# determine whether a MachineError (valid event but invalid state) ...
if self.has_trigger(trigger):
raise MachineError(msg)
# or AttributeError (invalid event) is appropriate
else:
raise AttributeError("Do not know event named '%s'." % trigger)
_LOGGER.warning(msg)
res = False
return res
def _get_trigger(self, model, trigger_name, *args, **kwargs):
"""Convenience function added to the model to trigger events by name.
Args:
model (object): Model with assigned event trigger.
trigger_name (str): Name of the trigger to be called.
*args: Variable length argument list which is passed to the triggered event.
**kwargs: Arbitrary keyword arguments which is passed to the triggered event.
Returns:
bool: True if a transitions has been conducted or the trigger event has been queued.
"""
return self.trigger_event(model, trigger_name, *args, **kwargs)
def _has_state(self, state, raise_error=False):
""" This function
Args:
state (NestedState): state to be tested
raise_error (bool): whether ValueError should be raised when the state
is not registered
Returns:
bool: Whether state is registered in the machine
Raises:
ValueError: When raise_error is True and state is not registered
"""
found = _super(HierarchicalMachine, self)._has_state(state)
if not found:
for a_state in self.states:
with self(a_state):
if self._has_state(state):
return True
if not found and raise_error:
msg = 'State %s has not been added to the machine' % (state.name if hasattr(state, 'name') else state)
raise ValueError(msg)
return found
def _init_state(self, state):
for model in self.models:
self._add_model_to_state(state, model)
if self.auto_transitions:
state_name = self.get_global_name(state.name)
parent = state_name.split(self.state_cls.separator, 1)
with self():
for a_state in self.get_nested_state_names():
if a_state == parent[0]:
self.add_transition('to_%s' % state_name, self.wildcard_all, state_name)
elif len(parent) == 1:
self.add_transition('to_%s' % a_state, state_name, a_state)
with self(state.name):
for substate in self.states.values():
self._init_state(substate)
def _recursive_initial(self, value):
if isinstance(value, string_types):
path = value.split(self.state_cls.separator, 1)
if len(path) > 1:
state_name, suffix = path
# make sure the passed state has been created already
_super(HierarchicalMachine, self.__class__).initial.fset(self, state_name)
with self(state_name):
self.initial = suffix
self._initial = state_name + self.state_cls.separator + self._initial
else:
_super(HierarchicalMachine, self.__class__).initial.fset(self, value)
elif isinstance(value, (list, tuple)):
return [self._recursive_initial(v) for v in value]
else:
_super(HierarchicalMachine, self.__class__).initial.fset(self, value)
return self._initial[0] if isinstance(self._initial, list) and len(self._initial) == 1 else self._initial
def _resolve_initial(self, models, state_name_path, prefix=[]):
if state_name_path:
state_name = state_name_path.pop(0)
with self(state_name):
return self._resolve_initial(models, state_name_path, prefix=prefix + [state_name])
if self.scoped.initial:
entered_states = []
for initial_state_name in listify(self.scoped.initial):
with self(initial_state_name):
entered_states.append(self._resolve_initial(models, [], prefix=prefix + [self.scoped.name]))
return entered_states if len(entered_states) > 1 else entered_states[0]
return self.state_cls.separator.join(prefix)
def _set_state(self, state_name):
if isinstance(state_name, list):
return [self._set_state(value) for value in state_name]
else:
a_state = self.get_state(state_name)
return a_state.value if isinstance(a_state.value, Enum) else state_name
def _trigger_event(self, _model, _trigger, _state_tree, *args, **kwargs):
if _state_tree is None:
_state_tree = self._build_state_tree(listify(getattr(_model, self.model_attribute)),
self.state_cls.separator)
res = {}
for key, value in _state_tree.items():
if value:
with self(key):
tmp = self._trigger_event(_model, _trigger, value, *args, **kwargs)
if tmp is not None:
res[key] = tmp
if res.get(key, False) is False and _trigger in self.events:
tmp = self.events[_trigger].trigger(_model, self, *args, **kwargs)
if tmp is not None:
res[key] = tmp
return None if not res or all(v is None for v in res.values()) else any(res.values())
| mit | -5,864,972,012,769,849,000 | 47.746811 | 146 | 0.565136 | false | 4.422454 | false | false | false |
glu10/trough | mainWindow.py | 1 | 7901 | """
Trough - a GTK+ RSS news reader
Copyright (C) 2015 Andrew Asp
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see {http://www.gnu.org/licenses/}.
Trough homepage: https://github.com/glu10/trough
"""
from math import floor
from gi import require_version
require_version('Gtk', '3.0')
from gi.repository import Gdk, GLib, Gtk
from cache import Cache
from feedDialog import FeedDialog
from gatherer import Gatherer
from newsStore import NewsStore
from newsView import NewsView
from preferences import Preferences
from preferencesWindow import PreferencesWindow
from stubGatherer import StubGatherer
from threePaneView import ThreePaneView
from twoPaneView import TwoPaneView
from utilityFunctions import make_button
class MainWindow(Gtk.Window):
__gtype_name__ = 'TroughWindow'
def __init__(self, preferences: Preferences, cache: Cache, **kwargs):
Gtk.Window.__init__(self, **kwargs)
self.preferences = preferences
self.cache = cache
self.news_store = NewsStore()
# self.gatherer = StubGatherer(self.news_store)
self.gatherer = Gatherer(self.news_store)
self.connect_signals()
self.prepare_appearance()
self.css_provider = self.create_css()
self.current_view = None
self.switch_view(self.news_store, self.preferences)
def connect_signals(self) -> None:
self.connect('key_press_event', self.on_key_press)
def prepare_appearance(self) -> None:
self.set_good_default_size()
self.set_window_icon()
self.create_header()
def set_good_default_size(self) -> None:
screen = self.get_screen()
active_window = screen.get_active_window()
if active_window:
monitor = screen.get_monitor_at_window(active_window)
geometry = screen.get_monitor_geometry(monitor)
width = floor(.60 * geometry.width)
height = floor(.75 * geometry.height)
self.set_default_size(width, height)
else:
# Guess a reasonable size
self.set_default_size(600, 800)
self.set_size_request(100, 100) # Minimum size
def set_window_icon(self) -> None:
"""
Attempts to find a generic RSS icon in the user's GTK theme
and associates it with the program if found.
"""
try:
theme = Gtk.IconTheme.get_default()
icon = theme.lookup_icon(
'rss',
32,
Gtk.IconLookupFlags.GENERIC_FALLBACK)
if icon:
icon = icon.load_icon()
self.set_icon(icon)
except GLib.GError: # No RSS icon found
pass
def switch_view(self, news_store: NewsStore, preferences: Preferences) -> NewsView:
"""
Activates the view currently chosen in the preferences and returns it.
"""
appearance_prefs = preferences.appearance_preferences()
view_key = appearance_prefs['View']
views = {'Two-Pane': TwoPaneView, 'Three-Pane': ThreePaneView}
view_class = TwoPaneView # views[view_key] # FIXME: Hardcoded for development
if type(self.current_view) != view_class: # Ensure not switching to same view.
if self.current_view:
self.current_view.destroy_display()
self.current_view = view_class(news_store, appearance_prefs)
self.add(self.current_view.top_level())
self.show_all()
self.get_preferred_size() # TODO: Investigate if still needed.
return self.current_view
def create_css(self) -> Gtk.CssProvider:
css_provider = Gtk.CssProvider()
css_provider.load_from_data(self.preferences.get_appearance_css())
context = Gtk.StyleContext()
context.add_provider_for_screen(
self.get_screen(),
css_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
return css_provider
def update_css(self) -> None:
self.css_provider.load_from_data(self.preferences.get_appearance_css())
def create_header(self) -> Gtk.HeaderBar:
header_bar = Gtk.HeaderBar(show_close_button=True)
header_bar.pack_start(self.create_add_button())
header_bar.pack_start(self.create_preferences_button())
header_bar.pack_start(self.create_refresh_button())
self.set_titlebar(header_bar)
return header_bar
def create_add_button(self) -> Gtk.Button:
add_button = make_button(
theme_icon_string='add',
tooltip='Quickly add a feed',
signal='clicked',
signal_func=self.on_add_clicked)
return add_button
def create_preferences_button(self) -> Gtk.Button:
preferences_button = make_button(
theme_icon_string='gtk-preferences',
backup_icon_string='preferences-system',
tooltip='Preferences',
signal='clicked',
signal_func=self.on_preferences_clicked)
return preferences_button
def create_refresh_button(self) -> Gtk.Button:
refresh_button = make_button(
theme_icon_string='view-refresh',
tooltip='Refresh',
signal='clicked',
signal_func=self.on_refresh_clicked)
refresh_button.set_focus_on_click(False)
return refresh_button
def on_add_clicked(self, widget: Gtk.Widget = None) -> None:
dialog = FeedDialog(self, self.preferences.feeds())
feed = dialog.get_response()
if feed:
self.preferences.add_feed(feed)
self.on_refresh_clicked() # Do a convenience refresh
def on_preferences_clicked(self, widget: Gtk.Widget = None) -> None:
pw = PreferencesWindow(self, self.preferences, self.cache)
response = pw.run()
if response == Gtk.ResponseType.OK:
pw.apply_choices()
pw.destroy()
def on_refresh_clicked(self, widget: Gtk.Widget = None) -> None:
"""
Goal:
1. Take each feed URI and look for current items.
2. Only scrape item URIs not in cache.
"""
'''
self.news_store.clear()
self.gatherer.request(None)
'''
for feed in self.preferences.feeds().values():
self.gatherer.request(feed)
@staticmethod
def do_scroll(widget: Gtk.Widget, scroll: Gtk.ScrollType) -> None:
try:
widget.do_scroll_child(widget, scroll, False)
except AttributeError:
pass
def on_key_press(self, widget: Gtk.Widget, event: Gdk.EventKey) -> None:
key = Gdk.keyval_name(event.keyval)
if key == 'F5':
self.on_refresh_clicked()
elif key == 'Left':
self.current_view.change_position(-1)
elif key == 'Right':
self.current_view.change_position(1)
elif key == 'Up':
self.do_scroll(widget, Gtk.ScrollType.STEP_BACKWARD)
elif key == 'Down':
self.do_scroll(widget, Gtk.ScrollType.STEP_FORWARD)
elif key == 'Return':
if event.state & Gdk.ModifierType.CONTROL_MASK:
self.current_view.get_then_open_link()
else:
self.current_view.change_position(0)
else:
pass
| gpl-3.0 | -6,444,048,257,782,608,000 | 34.751131 | 87 | 0.619542 | false | 3.974346 | false | false | false |
openqt/algorithms | leetcode/python/ac/lc844-backspace-string-compare.py | 1 | 1884 | # coding=utf-8
import unittest
"""844. Backspace String Compare
https://leetcode.com/problems/backspace-string-compare/description/
Given two strings `S` and `T`, return if they are equal when both are typed
into empty text editors. `#` means a backspace character.
**Example 1:**
**Input:** S = "ab#c", T = "ad#c"
**Output:** true
**Explanation** : Both S and T become "ac".
**Example 2:**
**Input:** S = "ab##", T = "c#d#"
**Output:** true
**Explanation** : Both S and T become "".
**Example 3:**
**Input:** S = "a##c", T = "#a#c"
**Output:** true
**Explanation** : Both S and T become "c".
**Example 4:**
**Input:** S = "a#c", T = "b"
**Output:** false
**Explanation** : S becomes "c" while T becomes "b".
**Note** :
1. `1 <= S.length <= 200`
2. `1 <= T.length <= 200`
3. `S` and `T` only contain lowercase letters and `'#'` characters.
**Follow up:**
* Can you solve it in `O(N)` time and `O(1)` space?
Similar Questions:
"""
class Solution(object):
def backspaceCompare(self, S, T):
"""
:type S: str
:type T: str
:rtype: bool
"""
return self._reduce(S) == self._reduce(T)
def _reduce(self, S):
val = []
for i in S:
if i != '#':
val.append(i)
elif val:
val.pop()
return ''.join(val)
class T(unittest.TestCase):
def test(self):
s = Solution()
self.assertTrue(s.backspaceCompare("mp", "mu#p"))
# self.assertTrue(s.backspaceCompare("ab#c", "ad#c"))
# self.assertTrue(s.backspaceCompare("ab##", "c#d#"))
# self.assertTrue(s.backspaceCompare("a##c", "#a#c"))
# self.assertFalse(s.backspaceCompare("a#c", "b"))
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | -7,044,472,968,458,829,000 | 19.478261 | 75 | 0.514862 | false | 3.276522 | false | false | false |
jantman/reviewboard-scripts | submit_review.py | 1 | 2979 | #!/usr/bin/env python
"""
A script using the ReviewBoard API Client
<https://pypi.python.org/pypi/RBTools/0.2>
<http://www.reviewboard.org/docs/rbtools/dev/api/>
to submit the review for a specific branch, at a specific commit.
requires:
rbtools
GitPython
"""
from rbtools.api.client import RBClient
import optparse
import sys
import datetime
import re
import subprocess
from puppetconfig import RB_USER, RB_PASSWORD
from rbhelpers import get_reviews_for_branch, get_repository_id_by_name
if __name__ == '__main__':
# if the program is executed directly parse the command line options
# and read the text to paste from stdin
parser = optparse.OptionParser()
parser.add_option('-r', '--repo', dest='repo', action="store", type="string",
help='find reviews for this repository')
parser.add_option('-b', '--branch', dest='branch', action="store", type="string",
help='find reviews for this branch')
parser.add_option('-v', '--verbose', dest='verbose', action="store_true", default=False,
help='verbose/debug output')
parser.add_option('-u', '--url', dest='url', action="store", type="string",
help='reviewboard server url')
parser.add_option('-m', '--message', dest='message', action="store", type="string",
help='review submit message/description')
options, args = parser.parse_args()
VERBOSE = False
if options.verbose:
VERBOSE = True
if not options.url:
print("ERROR: You must specify a reviewboard server URL (-u|--url) to use")
sys.exit(2)
if not options.repo:
print("ERROR: You must specify a repo (-r|--repo) to find reviews for")
sys.exit(2)
if not options.branch:
print("ERROR: You must specify a branch (-b|--branch) to find reviews for")
sys.exit(2)
client = RBClient(options.url, username=RB_USER, password=RB_PASSWORD)
root = client.get_root()
if not root:
print("Error - could not get RBClient root.")
sys.exit(1)
repo = get_repository_id_by_name(root, options.repo, verbose=VERBOSE)
if repo is None:
print("ERROR: Could not find ReviewBoard repository with name '%s'" % options.repo)
sys.exit(3)
reviews = get_reviews_for_branch(root, repo, options.branch, verbose=VERBOSE)
if len(reviews) == 0:
print("ERROR: No open reviews found for branch %s in repo %s" % (options.branch, repo))
sys.exit(4)
if len(reviews) > 1:
print("ERROR: Multiple open reviews found for branch %s in repo %s" % (repo, options.branch))
sys.exit(5)
# ok, we have ONE review for the branch
review = reviews[0]
print("Found review %d" % review.id)
rb_data = {'status': 'submitted'}
if options.message:
rb_data['description'] = options.message
print("Submitting review %d" % review.id)
review.update(data=rb_data)
| mit | -8,580,810,473,810,728,000 | 32.1 | 101 | 0.634777 | false | 3.709838 | false | false | false |
rthille/silk | django_silky/silk/code_generation/django_test_client.py | 8 | 1696 | import urllib
import autopep8
import jinja2
# noinspection PyUnresolvedReferences
from six.moves.urllib.parse import urlencode
from silk.profiling.dynamic import is_str_typ
from silk.profiling.profiler import silk_profile
template = """
from django.test import Client
c = Client()
response = c.{{ lower_case_method }}(path='{{ path }}'{% if data or content_type %},{% else %}){% endif %}{% if data %}
data={{ data }}{% endif %}{% if data and content_type %},{% elif data %}){% endif %}{% if content_type %}
content_type='{{ content_type }}'){% endif %}
"""
def _encode_query_params(query_params):
try:
query_params = urlencode(query_params)
except TypeError:
pass
query_params = '?' + query_params
return query_params
def gen(path,
method=None,
query_params=None,
data=None,
content_type=None):
"""generates python code representing a call via django client. useful for use in testing"""
method = method.lower()
t = jinja2.Template(template)
if method == 'get':
r = t.render(path=path,
data=query_params,
lower_case_method=method,
content_type=content_type)
else:
if query_params:
query_params = _encode_query_params(query_params)
path += query_params
if is_str_typ(data):
data = "'%s'" % data
r = t.render(path=path,
data=data,
lower_case_method=method,
query_params=query_params,
content_type=content_type)
return autopep8.fix_code(r, options=autopep8.parse_args(['--aggressive', ''])) | mit | -438,907,746,481,306,800 | 31.634615 | 119 | 0.589623 | false | 3.889908 | false | false | false |
beckdaniel/GPy | GPy/inference/latent_function_inference/exact_gaussian_inference.py | 8 | 2785 | # Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .posterior import Posterior
from ...util.linalg import pdinv, dpotrs, tdot
from ...util import diag
import numpy as np
from . import LatentFunctionInference
log_2_pi = np.log(2*np.pi)
class ExactGaussianInference(LatentFunctionInference):
"""
An object for inference when the likelihood is Gaussian.
The function self.inference returns a Posterior object, which summarizes
the posterior.
For efficiency, we sometimes work with the cholesky of Y*Y.T. To save repeatedly recomputing this, we cache it.
"""
def __init__(self):
pass#self._YYTfactor_cache = caching.cache()
def get_YYTfactor(self, Y):
"""
find a matrix L which satisfies LL^T = YY^T.
Note that L may have fewer columns than Y, else L=Y.
"""
N, D = Y.shape
if (N>D):
return Y
else:
#if Y in self.cache, return self.Cache[Y], else store Y in cache and return L.
#print "WARNING: N>D of Y, we need caching of L, such that L*L^T = Y, returning Y still!"
return Y
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None):
"""
Returns a Posterior class containing essential quantities of the posterior
"""
if mean_function is None:
m = 0
else:
m = mean_function.f(X)
YYT_factor = self.get_YYTfactor(Y-m)
K = kern.K(X)
Ky = K.copy()
diag.add(Ky, likelihood.gaussian_variance(Y_metadata)+1e-8)
Wi, LW, LWi, W_logdet = pdinv(Ky)
alpha, _ = dpotrs(LW, YYT_factor, lower=1)
log_marginal = 0.5*(-Y.size * log_2_pi - Y.shape[1] * W_logdet - np.sum(alpha * YYT_factor))
dL_dK = 0.5 * (tdot(alpha) - Y.shape[1] * Wi)
dL_dthetaL = likelihood.exact_inference_gradients(np.diag(dL_dK),Y_metadata)
return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL, 'dL_dm':alpha}
def LOO(self, kern, X, Y, likelihood, posterior, Y_metadata=None, K=None):
"""
Leave one out error as found in
"Bayesian leave-one-out cross-validation approximations for Gaussian latent variable models"
Vehtari et al. 2014.
"""
g = posterior.woodbury_vector
c = posterior.woodbury_inv
c_diag = np.diag(c)[:, None]
neg_log_marginal_LOO = 0.5*np.log(2*np.pi) - 0.5*np.log(c_diag) + 0.5*(g**2)/c_diag
#believe from Predictive Approaches for Choosing Hyperparameters in Gaussian Processes
#this is the negative marginal LOO
return -neg_log_marginal_LOO
| bsd-3-clause | -4,712,664,156,282,809,000 | 33.8125 | 141 | 0.619749 | false | 3.197474 | false | false | false |
johannwalder/content-reputation | server/app/server.py | 1 | 4154 | #!flask/bin/python
from flask import Flask, jsonify, abort, make_response
from flask.ext.restful import Api, Resource, reqparse, fields, marshal
from flask.ext.httpauth import HTTPBasicAuth
from flask.ext.sqlalchemy import SQLAlchemy
import sys
import os
sys.path.append(os.path.abspath('../models'))
sys.path.append(os.path.abspath('../database'))
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from sqlalchemy.orm import sessionmaker
from base import Base
from rating import Rating
from content_type import ContentType
from content_rating import ContentRating
import dev_settings
engine = create_engine(URL(**dev_settings.DATABASE))
Session = sessionmaker(bind=engine)
session = Session()
app = Flask(__name__, static_url_path="")
api = Api(app)
auth = HTTPBasicAuth()
rating_fields = {
'id': fields.Integer,
'level': fields.String,
'terms': fields.String,
'uri': fields.Url('rating')
}
content_rating_fields = {
'id': fields.Integer,
'location': fields.String,
'rating': fields.String,
'contentType': fields.String,
'uri': fields.Url('contentrating')
}
class RatingListAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('level', type = str, required = True,
help = 'No rating level provided', location = 'json')
self.reqparse.add_argument('terms', type = str, required = True,
help = 'No rating terms provided', location = 'json')
super(RatingListAPI, self).__init__()
def get(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('limit', type = int, default=10,
location = 'args')
self.reqparse.add_argument('offset', type = int, default=0,
location = 'args')
args = self.reqparse.parse_args()
limit = args.get('limit')
offset = args.get('offset')
ratings = session.query(Rating).limit(limit).offset(offset).all()
return {'ratings': [marshal(rating, rating_fields) for rating in ratings]}
class RatingAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('level', type = str, required = True,
help = 'No rating level provided', location = 'json')
self.reqparse.add_argument('terms', type = str, required = True,
help = 'No rating terms provided', location = 'json')
super(RatingAPI, self).__init__()
def get(self, id):
rating = session.query(Rating).filter(Rating.id == id).all()
if len(rating) == 0:
abort(404)
return {'rating': marshal(rating[0], rating_fields)}
class ContentRatingListAPI(Resource):
def __init__(self):
super(ContentRatingListAPI, self).__init__()
def get(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('limit', type = int, default=10,
location = 'args')
self.reqparse.add_argument('offset', type = int, default=0,
location = 'args')
args = self.reqparse.parse_args()
limit = args.get('limit')
offset = args.get('offset')
contentratings = session.query(ContentRating).limit(limit).offset(offset).all()
return {'contentratings': [marshal(contentrating, content_rating_fields) for contentrating in contentratings]}
class ContentRatingAPI(Resource):
def __init__(self):
super(ContentRatingAPI, self).__init__()
def get(self, id):
contentrating = session.query(ContentRating).filter(ContentRating.id == id).all()
if len(contentrating) == 0:
abort(404)
return {'contentrating': marshal(contentrating[0], content_rating_fields)}
api.add_resource(RatingListAPI, '/api/v1.0/ratings', endpoint='ratings')
api.add_resource(RatingAPI, '/api/v1.0/ratings/<int:id>', endpoint = 'rating')
api.add_resource(ContentRatingListAPI, '/api/v1.0/contentratings', endpoint='contentratings')
api.add_resource(ContentRatingAPI, '/api/v1.0/contentratings/<int:id>', endpoint = 'contentrating')
| apache-2.0 | 5,753,444,572,748,254,000 | 34.20339 | 118 | 0.656716 | false | 3.725561 | false | false | false |
skidekeersmaecker/raspi-cursus | labo3/tool.py | 1 | 1946 | #!/usr/bin/python
import time
import datetime
try:
f = open('ActivatedAlarmTimes.log', 'r')
lines = f.readlines()
f.close()
start = True
while(start == True):
method = raw_input("Enter what method you want to use to clean the log file. Delete line per line: 'line', or delete per date range: 'range'. \n")
if(method == 'line'):
start = False
newLines = []
print("Method line per line: \n\n")
for line in lines:
print("Next line from log: \n")
print(line)
option = raw_input("Delete (D) or keep (K) this line?")
if (option == 'D'):
print("\nDELETED LINE")
#lines.remove(line)
elif (option == 'K'):
newLines.append(line)
print("\nKEPT LINE")
else:
print("Invalid request.")
f = open('ActivatedAlarmTimes.log', 'w')
for line in newLines:
f.write(line)
f.close
elif(method == 'range'):
start = False
newLines = []
print("method range")
startTimeStamp = time.strptime(raw_input("Start-time to delete logs.(dd-mm-yy hh:mm:ss): "), "%d-%m-%y %H:%M:%S")
endTimeStamp = time.strptime(raw_input("End-time to delete logs.(dd-mm-yy hh:mm:ss): "), "%d-%m-%y %H:%M:%S")
for line in lines:
#Ik krijg deze text objecten niet geparsed naar tijd object, op geen enkele manier. Veel opgezocht en niets werkt. Is het enige dat niet werkt van het labo.
time = time.strptime(line, "%d-%m-%y %H:%M:%S")
time.struct_time(tm_year=2000, tm_mon=11, tm_mday=30, tm_hour=0, tm_min=0,
tm_sec=0, tm_wday=3, tm_yday=335, tm_isdst=-1)
if(time < startTimeStamp) or (time > endTimeStamp):
newLines.append(line)
f = open('ActivatedAlarmTimes.log', 'w')
for line in newLines:
f.write(line)
f.close
else:
print("Invalid request. \n")
except KeyboardInterrupt:
print("Exiting program")
| mit | -5,257,493,199,664,969,000 | 27.202899 | 172 | 0.584789 | false | 3.031153 | false | false | false |
suzannerohrback/somaticCNVpipeline | bin/segment/segmentfile.py | 1 | 1630 | #!/usr/bin/python
import sys
import os
import inspect
import subprocess as sub
import shlex
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import config as cfg
def writeMatlabScript(sample, species, tempDir, lowessDir, segmentDir):
segVars = cfg.Segment()
matlabName = ''.join(sample.split('_'))
matlabName = ''.join(matlabName.split('-'))
matlabName = ''.join(matlabName.split('.'))
scriptFile = tempDir + matlabName + '.m'
OUT = open(scriptFile, 'w')
OUT.write('%Sample specific variable definitions\n')
OUT.write(str("refFile = '" + segVars.binDict[species] + "';\n"))
OUT.write(str("binFile = '" + lowessDir + sample + ".lowess.txt';\n"))
OUT.write(str("saveFile = '" + segmentDir + sample + ".segments.txt';\n"))
# OUT.write(str("chromNum = " + str(segVars.chromNumDict[species]) + ";\n"))
OUT.write(str("alpha = " + str(segVars.CBSalpha) + ";\n"))
OUT.write('\n\n\n\n\n%Generic processing code\n')
IN = open(segVars.matlabBase, 'r')
for x in IN:
OUT.write(x)
OUT.close()
IN.close()
return matlabName
def segmentOne(sample, species, tempDir, lowessDir, segmentDir):
#write matlab script
scriptName = writeMatlabScript(sample, species, tempDir, lowessDir, segmentDir)
os.chdir(tempDir)
#run matlab script
stdoutFile = tempDir + sample + '.stderr.txt'
stdout = open(stdoutFile, 'w')
cmd = 'matlab -nodisplay -r ' + scriptName
cmd = shlex.split(cmd)
p = sub.Popen(cmd, stdout = stdout, stderr = sub.STDOUT)
p.wait()
stdout.close()
| mit | 4,090,381,019,973,567,000 | 19.123457 | 86 | 0.674847 | false | 2.900356 | false | false | false |
speedyGonzales/highchart_examples | highchart_examples/highchart_examples/settings.py | 1 | 2499 | """
Django settings for highchart_examples project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7zwj0-r@m8w^5$q)5-q%tf^358+-or=0y*%3v0z3#91z@^psgu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'line_charts',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'highchart_examples.urls'
WSGI_APPLICATION = 'highchart_examples.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
#template location
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(BASE_DIR),"highchart_examples","static","templates"),
)
if DEBUG:
MEDIA_URL = '/media/'
STATIC_ROOT=os.path.join(os.path.dirname(BASE_DIR),"highchart_examples","static","static-only")
MEDIA_ROOT=os.path.join(os.path.dirname(BASE_DIR),"highchart_examples","static","media")
STATICFILES_DIRS=(
os.path.join(os.path.dirname(BASE_DIR),"highchart_examples","static","static"),
)
| gpl-2.0 | 4,479,322,939,957,160,000 | 25.03125 | 99 | 0.715486 | false | 3.212082 | false | false | false |
adamrvfisher/TechnicalAnalysisLibrary | MoveToDatabaseMod.py | 1 | 7161 | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 11 19:26:12 2017
@author: AmatVictoriaCuramIII
"""
import numpy as np
import pandas as pd
temp = pd.read_pickle('/Users/AmatVictoriaCuramIII/Desktop/PythonFiles/QQQAGGAdvice07_50')
ranger = range(0,len(temp['Adj Close']))
index = temp.index
#transfer to database modification
temp['LogRet'] = np.log(temp['Adj Close']/temp['Adj Close'].shift(1))
temp['LogRet'] = temp['LogRet'].fillna(0)
temp['52wkLow'] = temp['Adj Close'].rolling(252).min()
temp['52wkMax'] = temp['Adj Close'].rolling(252).max()
temp['Age'] = len(temp['Open'])
temp['TotalAverageAnnualReturn'] = temp['LogRet'].mean() * 252
temp['TotalAverageAnnualStandardDeviation'] = temp['LogRet'].std(
)*np.sqrt(252)
temp['CoefficientOfVaration'] = (
temp['TotalAverageAnnualStandardDeviation']/temp['TotalAverageAnnualReturn'])
temp['Rolling52wkMockReturn'] = temp['LogRet'].rolling(
center=False, window = 252).mean()
temp['Rolling52wkReturn'] = np.log(temp['Adj Close']/
temp['Adj Close'].shift(252))
temp['Rolling52wkStandardDeviation'] = temp['LogRet'].rolling(
center = False, window = 252).std()
temp['Rolling4wkStandardDeviation'] = temp['LogRet'].rolling(
center = False, window = 20).std()
temp['AverageAnnualRollingVolume'] = temp['Volume'].rolling(
center=False, window=252).mean()
temp['Rolling52wkCoefficientOfVariation'] = (
temp['Rolling52wkStandardDeviation']/temp['Rolling52wkReturn'])
temp['Rolling52wkDoubleStandardDeviation'] = (
temp['Rolling52wkStandardDeviation'].rolling(
center = False, window = 252))
temp['4wkOver52wkStandardDeviationRatio'] = (
temp['Rolling4wkStandardDeviation']/temp['Rolling52wkStandardDeviation'])
# ADX with PDI, MDI, ADX, ADXmean, TrueRange, AverageTrueRange, ADXStrength
ADXwindow = 14
temp['ADXUpMove'] = temp['High'] - temp['High'].shift(1)
temp['ADXDownMove'] = temp['Low'] - temp['Low'].shift(1)
temp['Method1'] = temp['High'] - temp['Low']
temp['Method2'] = abs((temp['High'] - temp['Adj Close'].shift(1)))
temp['Method3'] = abs((temp['Low'] - temp['Adj Close'].shift(1)))
temp['Method1'] = temp['Method1'].fillna(0)
temp['Method2'] = temp['Method2'].fillna(0)
temp['Method3'] = temp['Method3'].fillna(0)
temp['TrueRange'] = temp[['Method1','Method2','Method3']].max(axis = 1)
temp['AverageTrueRange'] = temp['TrueRange'].rolling(window = ADXwindow,
center=False).sum()
temp['AverageTrueRange'] = ((temp['AverageTrueRange'].shift(1)*(ADXwindow-1
) + temp['TrueRange']) / ADXwindow)
temp['PDM'] = (temp['High'] - temp['High'].shift(1))
temp['MDM'] = (temp['Low'].shift(1) - temp['Low'])
temp['PDM'] = temp['PDM'][temp['PDM'] > 0]
temp['MDM'] = temp['MDM'][temp['MDM'] > 0]
temp['PDM'] = temp['PDM'].fillna(0)
temp['MDM'] = temp['MDM'].fillna(0)
temp['SmoothPDM'] = temp['PDM'].rolling(window = ADXwindow,
center=False).sum()
temp['SmoothPDM'] = ((temp['SmoothPDM'].shift(1)*(ADXwindow-1
) + temp['PDM']) / ADXwindow)
temp['SmoothMDM'] = temp['MDM'].rolling(window = ADXwindow,
center=False).sum()
temp['SmoothMDM'] = ((temp['SmoothMDM'].shift(1)*(ADXwindow-1
) + temp['MDM']) / ADXwindow)
temp['PDI'] = (100*(temp['SmoothPDM']/temp['AverageTrueRange']))
temp['MDI'] = (100*(temp['SmoothMDM']/temp['AverageTrueRange']))
temp['DIdiff'] = abs(temp['PDI'] - temp['MDI'])
temp['DIsum'] = temp['PDI'] + temp['MDI']
temp['DX'] = (100 * (temp['DIdiff']/temp['DIsum']))
temp['ADX'] = temp['DX'].rolling(window = ADXwindow, center = False).mean()
temp['DIdivergence'] = temp['PDI'] - temp['MDI']
temp['ADXMean'] = temp['ADX'].mean() * .9 #Scaling factor
temp['ADXStrength'] = temp['ADX']/temp['ADXMean']
#Ballerbands Lower/Upperband, bandwidth, b%
BBwindow = 20
temp['nDaySMA'] = temp['Adj Close'].rolling(window=BBwindow, center=False).mean()
temp['nDaySTD'] = temp['Adj Close'].rolling(window=BBwindow, center=False).std()
temp['UpperBand'] = temp['nDaySMA'] + (temp['nDaySTD'] * 2)
temp['LowerBand'] = temp['nDaySMA'] - (temp['nDaySTD'] * 2)
temp['BandWidth'] = ((temp['UpperBand'] - temp['LowerBand'])/temp['nDaySMA'])*100
temp['B'] = (temp['Adj Close'] - temp['LowerBand'])/(temp['UpperBand'] - temp['LowerBand'])
#Chaikin Money Flow MFMultiplier, CMF (CMF - need to normalize or make rolling sum)
CMFwindow = 20
temp['MFMultiplier'] = (((temp['Adj Close'] - temp['Low']) - (temp['High']
- temp['Adj Close'])) / (temp['High'] - temp['Low']))
temp['MFVolume'] = (temp['Volume'] * temp['MFMultiplier'])
temp['ZeroLine'] = 0
temp['CMF'] = temp['MFVolume'].rolling(center=False, window=CMFwindow).sum(
)/temp['Volume'].rolling(center=False, window=CMFwindow).sum()
#Commodity Channel Index
constant = .02
CCIwindow = 20
temp['TP'] = (temp['High'] + temp['Low'] + temp['Adj Close']) / 3
temp['TPSMA'] = temp['TP'].rolling(center=False, window = CCIwindow).mean()
temp['MeanDeviation'] = temp['TP'].rolling(center=False, window = CCIwindow).std()
temp['CCI'] = ((temp['TP'] - temp['TPSMA'])/(constant*temp['MeanDeviation']))
temp['Top'] = 100
temp['Bottom'] = -100
#Day over average rolling volume DayOverARV
DayOverwindow = 60
temp['AverageRollingVolume'] = temp['Volume'].rolling(center=False,
window=DayOverwindow).mean()
temp['DayOverARV'] = temp['Volume']/temp['AverageRollingVolume']
#Simple Moving Average
littlewindow = 20 #number of days for moving average window
bigwindow = 252 #numer of days for moving average window
temp['SmallSMA'] = temp['Adj Close'].rolling(window=littlewindow, center=False).mean()
temp['LargeSMA'] = temp['Adj Close'].rolling(window=bigwindow, center=False).mean()
temp['4wkOver52wk'] = (temp['SmallSMA'] - temp['LargeSMA'])/temp['Adj Close']
temp['priceOver4wk'] = (temp['Adj Close'] - temp['SmallSMA'])/temp['Adj Close']
#RSI
closeprice = temp['Adj Close']
RSIwindow = 14
change = closeprice.diff()
change = change[1:]
up, down = change.copy(), change.copy()
up[up < 0] = 0
down[down > 0] = 0
AvgGain = up.rolling(RSIwindow).mean()
AvgGain = AvgGain.fillna(0)
AvgLoss = down.abs().rolling(RSIwindow).mean()
AvgLoss = AvgLoss.fillna(0)
RS = AvgGain/AvgLoss
RS = RS.fillna(0)
RSI = 100 - (100/(1.0+RS))
temp['RSI'] = pd.Series(RSI)
#Gap Up
temp['GapUp'] = (temp['High'].shift(1) - temp['Low']) / temp['Adj Close'].shift(1)
temp['GapUp'] = temp['GapUp'][temp['GapUp'] < 0]
temp['GapUp'] = temp['GapUp'].fillna(0)
temp['GapUp'] = np.where(temp['GapUp'] == 0 , 0, (-1*temp['GapUp']))
#Gap Down
temp['GapDown'] = (temp['Low'].shift(1) - temp['High']) / temp['Adj Close'].shift(1)
temp['GapDown'] = temp['GapDown'][temp['GapDown'] > 0]
temp['GapDown'] = temp['GapDown'].fillna(0)
#Rate of Change
lag = 12
temp['RateOfChange'] = (temp['Adj Close'] - temp['Adj Close'].shift(lag)
) / temp['Adj Close'].shift(lag)
| apache-2.0 | 6,850,816,351,955,829,000 | 41.630952 | 91 | 0.623796 | false | 2.961538 | false | false | false |
israeleriston/scientific-week | backend/venv/lib/python3.5/site-packages/flake8/plugins/_trie.py | 6 | 3003 | """Independent implementation of a Trie tree."""
__all__ = ('Trie', 'TrieNode')
def _iterate_stringlike_objects(string):
for i in range(len(string)):
yield string[i:i + 1]
class Trie(object):
"""The object that manages the trie nodes."""
def __init__(self):
"""Initialize an empty trie."""
self.root = TrieNode(None, None)
def add(self, path, node_data):
"""Add the node data to the path described."""
node = self.root
for prefix in _iterate_stringlike_objects(path):
child = node.find_prefix(prefix)
if child is None:
child = node.add_child(prefix, [])
node = child
node.data.append(node_data)
def find(self, path):
"""Find a node based on the path provided."""
node = self.root
for prefix in _iterate_stringlike_objects(path):
child = node.find_prefix(prefix)
if child is None:
return None
node = child
return node
def traverse(self):
"""Traverse this tree.
This performs a depth-first pre-order traversal of children in this
tree. It returns the results consistently by first sorting the
children based on their prefix and then traversing them in
alphabetical order.
"""
return self.root.traverse()
class TrieNode(object):
"""The majority of the implementation details of a Trie."""
def __init__(self, prefix, data, children=None):
"""Initialize a TrieNode with data and children."""
self.children = children or {}
self.data = data
self.prefix = prefix
def __repr__(self):
"""Generate an easy to read representation of the node."""
return 'TrieNode(prefix={0}, data={1})'.format(
self.prefix, self.data
)
def find_prefix(self, prefix):
"""Find the prefix in the children of this node.
:returns: A child matching the prefix or None.
:rtype: :class:`~TrieNode` or None
"""
return self.children.get(prefix, None)
def add_child(self, prefix, data, children=None):
"""Create and add a new child node.
:returns: The newly created node
:rtype: :class:`~TrieNode`
"""
new_node = TrieNode(prefix, data, children)
self.children[prefix] = new_node
return new_node
def traverse(self):
"""Traverse children of this node.
This performs a depth-first pre-order traversal of the remaining
children in this sub-tree. It returns the results consistently by
first sorting the children based on their prefix and then traversing
them in alphabetical order.
"""
if not self.children:
return
for prefix in sorted(self.children):
child = self.children[prefix]
yield child
for child in child.traverse():
yield child
| mit | -6,228,070,058,706,080,000 | 29.958763 | 76 | 0.590077 | false | 4.333333 | false | false | false |
nhicher/ansible | test/units/module_utils/test_database.py | 75 | 4393 | import pytest
from ansible.module_utils.database import (
pg_quote_identifier,
SQLParseError,
)
# These are all valid strings
# The results are based on interpreting the identifier as a table name
VALID = {
# User quoted
'"public.table"': '"public.table"',
'"public"."table"': '"public"."table"',
'"schema test"."table test"': '"schema test"."table test"',
# We quote part
'public.table': '"public"."table"',
'"public".table': '"public"."table"',
'public."table"': '"public"."table"',
'schema test.table test': '"schema test"."table test"',
'"schema test".table test': '"schema test"."table test"',
'schema test."table test"': '"schema test"."table test"',
# Embedded double quotes
'table "test"': '"table ""test"""',
'public."table ""test"""': '"public"."table ""test"""',
'public.table "test"': '"public"."table ""test"""',
'schema "test".table': '"schema ""test"""."table"',
'"schema ""test""".table': '"schema ""test"""."table"',
'"""wat"""."""test"""': '"""wat"""."""test"""',
# Sigh, handle these as well:
'"no end quote': '"""no end quote"',
'schema."table': '"schema"."""table"',
'"schema.table': '"""schema"."table"',
'schema."table.something': '"schema"."""table"."something"',
# Embedded dots
'"schema.test"."table.test"': '"schema.test"."table.test"',
'"schema.".table': '"schema."."table"',
'"schema."."table"': '"schema."."table"',
'schema.".table"': '"schema".".table"',
'"schema".".table"': '"schema".".table"',
'"schema.".".table"': '"schema.".".table"',
# These are valid but maybe not what the user intended
'."table"': '".""table"""',
'table.': '"table."',
}
INVALID = {
('test.too.many.dots', 'table'): 'PostgreSQL does not support table with more than 3 dots',
('"test.too".many.dots', 'database'): 'PostgreSQL does not support database with more than 1 dots',
('test.too."many.dots"', 'database'): 'PostgreSQL does not support database with more than 1 dots',
('"test"."too"."many"."dots"', 'database'): "PostgreSQL does not support database with more than 1 dots",
('"test"."too"."many"."dots"', 'schema'): "PostgreSQL does not support schema with more than 2 dots",
('"test"."too"."many"."dots"', 'table'): "PostgreSQL does not support table with more than 3 dots",
('"test"."too"."many"."dots"."for"."column"', 'column'): "PostgreSQL does not support column with more than 4 dots",
('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema."table"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema".', 'table'): 'Identifier name unspecified or unquoted trailing dot',
}
HOW_MANY_DOTS = (
('role', 'role', '"role"',
'PostgreSQL does not support role with more than 1 dots'),
('db', 'database', '"db"',
'PostgreSQL does not support database with more than 1 dots'),
('db.schema', 'schema', '"db"."schema"',
'PostgreSQL does not support schema with more than 2 dots'),
('db.schema.table', 'table', '"db"."schema"."table"',
'PostgreSQL does not support table with more than 3 dots'),
('db.schema.table.column', 'column', '"db"."schema"."table"."column"',
'PostgreSQL does not support column with more than 4 dots'),
)
VALID_QUOTES = ((test, VALID[test]) for test in sorted(VALID))
INVALID_QUOTES = ((test[0], test[1], INVALID[test]) for test in sorted(INVALID))
@pytest.mark.parametrize("identifier, quoted_identifier", VALID_QUOTES)
def test_valid_quotes(identifier, quoted_identifier):
assert pg_quote_identifier(identifier, 'table') == quoted_identifier
@pytest.mark.parametrize("identifier, id_type, msg", INVALID_QUOTES)
def test_invalid_quotes(identifier, id_type, msg):
with pytest.raises(SQLParseError) as ex:
pg_quote_identifier(identifier, id_type)
ex.match(msg)
@pytest.mark.parametrize("identifier, id_type, quoted_identifier, msg", HOW_MANY_DOTS)
def test_how_many_dots(identifier, id_type, quoted_identifier, msg):
assert pg_quote_identifier(identifier, id_type) == quoted_identifier
with pytest.raises(SQLParseError) as ex:
pg_quote_identifier('%s.more' % identifier, id_type)
ex.match(msg)
| gpl-3.0 | 5,451,291,193,281,736,000 | 42.93 | 120 | 0.631004 | false | 3.73237 | true | false | false |
cmu-db/cmdbac | core/scripts/vagrant_deploy.py | 2 | 2528 | #!/usr/bin/env python
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cmudbac.settings")
import django
django.setup()
from library.models import *
from deployers import *
from drivers import *
from analyzers import *
import utils
def main():
if len(sys.argv) not in [3, 4]:
return
repo_name = sys.argv[1]
deploy_id = sys.argv[2]
if len(sys.argv) > 3:
database_name = sys.argv[3]
else:
database_name = 'MySQL'
print 'Database : {} ...'.format(database_name)
repo = Repository.objects.get(name=repo_name)
database = Database.objects.get(name=database_name)
moduleName = "deployers.%s" % (repo.project_type.deployer_class.lower())
moduleHandle = __import__(moduleName, globals(), locals(), [repo.project_type.deployer_class])
klass = getattr(moduleHandle, repo.project_type.deployer_class)
deployer = klass(repo, database, deploy_id)
if deployer.deploy() != 0:
deployer.kill_server()
sys.exit(-1)
print 'Driving ...'
driver = BaseDriver(deployer.get_main_url(), deployer.get_database(), deployer.deploy_id, deployer.base_path, deployer.log_file)
try:
driverResult = driver.drive()
except Exception, e:
LOG.exception(e)
driverResult = {}
print 'Random Walking ...'
try:
random_driver = RandomDriver(driver)
random_driver.start()
print 'Random Walk Forms Count: {}'.format(len(random_driver.forms))
print 'Basic Forms Count: {}'.format(len(driverResult['forms']))
for form in random_driver.forms:
if any(random_driver.equal_form(form, ret_form) for ret_form in driverResult['forms']):
continue
driverResult['forms'].append(form)
except Exception, e:
LOG.exception(e)
deployer.kill_server()
analyzer = get_analyzer(deployer)
for form in driverResult['forms']:
analyzer.analyze_queries(form['queries'])
for url in driverResult['urls']:
analyzer.analyze_queries(url['queries'])
driverResult['statistics'] = analyzer.queries_stats
analyzer.analyze_database()
driverResult['statistics'].update(analyzer.database_stats)
driverResult['informations'] = analyzer.database_informations
deployer.save_attempt(ATTEMPT_STATUS_SUCCESS, driverResult)
if __name__ == "__main__":
main() | apache-2.0 | -2,989,338,068,663,246,000 | 32.276316 | 132 | 0.658228 | false | 3.611429 | false | false | false |
rPawel/Tomboy2Evernote | Tomboy2Evernote.py | 1 | 6982 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import re
import sys, getopt
import glob
import os
def process_files(inputdir, outputdir):
os.chdir(inputdir)
enex_notes = []
output_filename = 'Tomboy2Evernote.enex'
i = 0
for file in glob.glob("*.note"):
note_file_path = inputdir + '/' + file
note_body = open(note_file_path, 'r').read()
tag = get_tag(note_body)
print(tag)
title = get_title(note_body)
html_note_body = get_html_body(note_body)
created_date = tomboy_to_enex_date(get_created_date(note_body))
updated_date = tomboy_to_enex_date(get_updated_date(note_body))
enex_notes.append(make_enex(title, html_note_body, created_date, updated_date, tag))
i += 1
multi_enex_body = make_multi_enex(enex_notes)
save_to_file(outputdir, output_filename, multi_enex_body)
print("Exported notes count: ",i)
print("Evernote file location: " + outputdir + "/" + output_filename)
def get_title(note_body):
title_regex = re.compile("<title>(.+?)</title>")
matches = title_regex.search(note_body);
if matches:
return matches.group(1)
else:
return "No Title"
def get_created_date(note_body):
created_date_regex = re.compile("<create-date>(.+?)</create-date>")
matches = created_date_regex.search(note_body);
if matches:
return matches.group(1)
else:
return "No Created Date"
def get_updated_date(note_body):
updated_date_regex = re.compile("<last-change-date>(.+?)</last-change-date>")
matches = updated_date_regex.search(note_body);
if matches:
return matches.group(1)
else:
return "No Updated Date"
def tomboy_to_enex_date(tomboy_date):
return re.sub(r"^([0-9]{4})-([0-9]{2})-([0-9]{2})T([0-9]{2}):([0-9]{2}):([0-9]{2}).*", r"\1\2\3T\4\5\6Z",
tomboy_date)
def get_tag(note_body):
created_date_regex = re.compile("<tag>(.+?)</tag>")
matches = created_date_regex.search(note_body);
if matches:
parts = matches.group(1).split(':')
if(len(parts) == 3):
return parts[2]
else:
return "NA"
else:
return "NA"
def get_html_body(note_body):
new_line = '<BR/>'
xml_tag = r"<(\/?)[a-zA-Z0-9_\-:]+>"
start_xml_tag = r"<[a-zA-Z0-9_\-:]+>"
# make note body a one liner
note_body = note_body.replace('\n', new_line)
# get content
note_body = re.sub(r".*<note-content.+?>(.+?)</note-content>.*", r"\1", note_body)
# strip title until new_line or start_xml_tag
note_body = re.sub(r"^(.+?)(" + start_xml_tag + "|" + new_line + ")", r"\2", note_body)
# strip first two new lines, even if prefixed with an xml tag
tag = re.match("^" + start_xml_tag, note_body)
if tag != None:
note_body = re.sub(r"^" + start_xml_tag, r"", note_body)
note_body = re.sub(r"^(" + new_line + "){1,2}", r"", note_body)
if tag != None:
note_body = tag.group(0) + note_body
# links
note_body = re.sub(r"<link:internal>(.+?)</link:internal>", r"\1", note_body)
note_body = re.sub(r"<link:broken>(.+?)</link:broken>", r"\1", note_body)
p = re.compile(r"(<link:url>(.+?)</link:url>)")
for m in p.finditer(note_body):
if re.search(r"^([a-zA-Z0-9\._%+\-]+@(?:[a-zA-Z0-9\-]+\.)+[a-zA-Z]{2,10}|https?://.+)$", m.group(2)):
note_body = note_body.replace(m.group(1), '<a href="' + m.group(2) + '">' + m.group(2) + "</a>")
else:
note_body = note_body.replace(m.group(1), m.group(2))
# lists
note_body = re.sub(r"<(\/?)list>", r"<\1ul>", note_body)
note_body = re.sub(r'<list-item dir="ltr">', r"<li>", note_body)
note_body = re.sub(r"<(\/?)list-item>", r"<\1li>", note_body)
# higlight
note_body = re.sub(r"<highlight>(.+?)</highlight>", r'<span style="background:yellow">\1</span>', note_body)
# font size
note_body = re.sub(r"<size:small>(.+?)</size:small>", r'<span style="font-size:small">\1</span>', note_body)
note_body = re.sub(r"<size:large>(.+?)</size:large>", r'<span style="font-size:large">\1</span>', note_body)
note_body = re.sub(r"<size:huge>(.+?)</size:huge>", r'<span style="font-size:xx-large">\1</span>', note_body)
# text style
note_body = re.sub(r"<(\/?)monospace>", r"<\1code>", note_body)
note_body = re.sub(r"<(\/?)bold>", r"<\1b>", note_body)
note_body = re.sub(r"<(\/?)italic>", r"<\1i>", note_body)
note_body = re.sub(r"<(\/?)strikethrough>", r"<\1strike>", note_body)
# identation
note_body = re.sub(r"\t", r" ", note_body)
while re.search(new_line + " ", note_body) != None:
note_body = re.sub("(" + new_line + " *) ", r"\1 ", note_body)
# set new lines
note_body = note_body.replace(new_line, '<br/>\n')
return note_body
def make_enex(title, body, created_date, updated_date, tag):
return '''<note><title>''' + title + '''</title><content><![CDATA[<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd">
<en-note style="word-wrap: break-word; -webkit-nbsp-mode: space; -webkit-line-break: after-white-space;">
''' + body + '''
</en-note>]]></content><created>''' + created_date + '''</created><updated>''' + updated_date + '''</updated><tag>''' + tag + '''</tag></note>'''
def make_multi_enex(multi_enex_body):
return '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE en-export SYSTEM "http://xml.evernote.com/pub/evernote-export2.dtd">
<en-export export-date="20150412T153431Z" application="Evernote/Windows" version="5.x">
''' + ''.join(multi_enex_body) + '''</en-export>'''
def save_to_file(outputdir, filename, body):
if not os.path.exists(outputdir):
os.makedirs(outputdir)
text_file = open(outputdir + '/' + filename, "w")
text_file.write(body)
text_file.close()
def get_help_line():
print('Usage: ', sys.argv[0], ' -i <inputdir> -o <outputdir>')
def get_input_params(argv):
inputdir = ''
outputdir = ''
printhelpline = 0
try:
opts, args = getopt.getopt(argv, "hi:o:", ["idir=", "odir="])
except getopt.GetoptError:
exit_with_error()
for opt, arg in opts:
print(opt + ':' + arg)
if opt == '-h':
get_help_line()
sys.exit()
elif opt in ("-i", "--idir"):
inputdir = arg
elif opt in ("-o", "--odir"):
outputdir = arg
if (inputdir == ""):
print("Error: Missing input folder")
printhelpline = 1
if (outputdir == ""):
print("Error: Missing output folder")
printhelpline = 1
if printhelpline == 1:
exit_with_error()
return (inputdir, outputdir)
def exit_with_error():
get_help_line()
sys.exit(2)
def main(argv):
inputdir, outputdir = get_input_params(argv)
process_files(inputdir, outputdir)
if __name__ == "__main__":
main(sys.argv[1:])
| mit | 6,535,413,119,255,935,000 | 33.058537 | 145 | 0.569608 | false | 2.926236 | false | false | false |
sajithshetty/SLAE32 | additional/ARC4_Crypter/ARC4.py | 1 | 1129 | #!/usr/bin/python
from Crypto.Cipher import ARC4
from ctypes import *
# shellcode to give /bin/sh using execve sys call
shell_code = ("\x31\xc0\x50\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x50\x89\xe2\x53\x89\xe1\xb0\x0b\xcd\x80")
#i want to display the encrypted and decrypted shellcode in hex format so created a function
def out_shellcodeformat(shell_code):
encoded = ""
for x in bytearray(shell_code):
encoded += '\\x'
encoded += '%02x' % x
print (encoded)
#Key used to encrypt shellcode
e = ARC4.new('1234567890')
# Key used to decrypt shellcode
d = ARC4.new('1234567890')
enc_text = e.encrypt(shell_code)
print "[+]Encrypted text:", out_shellcodeformat(enc_text)
decrypted_text = d.decrypt(enc_text)
print "[+]Decrypted text:", out_shellcodeformat(decrypted_text)
libc = CDLL('libc.so.6')
sc = c_char_p(decrypted_text)
size = len(decrypted_text)
print "[+]Length of decrypted shellcode:", size
addr = c_void_p(libc.valloc(size))
memmove(addr, sc, size)
libc.mprotect(addr, size, 0x7)
run = cast(addr, CFUNCTYPE(c_void_p))
run()
| cc0-1.0 | -425,334,183,363,040,700 | 26.536585 | 117 | 0.671391 | false | 2.700957 | false | false | false |
Alwnikrotikz/kegbot | pykeg/src/pykeg/web/middleware.py | 1 | 2140 | # Copyright 2011 Mike Wakerly <opensource@hoho.com>
#
# This file is part of the Pykeg package of the Kegbot project.
# For more information on Pykeg or Kegbot, see http://kegbot.org/
#
# Pykeg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Pykeg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pykeg. If not, see <http://www.gnu.org/licenses/>.
from pykeg.core import models
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.http import HttpResponseRedirect
class KegbotSiteMiddleware:
def process_request(self, request):
if not hasattr(request, 'kbsite'):
sitename = 'default'
try:
request.kbsite = models.KegbotSite.objects.get(name=sitename)
except models.KegbotSite.DoesNotExist:
request.kbsite = None
class SiteActiveMiddleware:
"""Middleware which throws 503s when KegbotSite.is_active is false."""
ALLOWED_PATHS = (
'/accounts/login/',
'/admin/',
'/site_media/',
)
def _path_allowed(self, path):
for p in self.ALLOWED_PATHS:
if path.startswith(p):
return True
return False
def process_request(self, request):
kbsite = None
if hasattr(request, 'kbsite'):
kbsite = request.kbsite
# We have a KegbotSite, and that site is active: nothing to do.
if kbsite and kbsite.is_active:
return None
# If the request is for a whitelisted path, allow it.
if self._path_allowed(request.path):
return None
# Allow staff/superusers access if inactive.
if request.user.is_staff or request.user.is_superuser:
return None
else:
return HttpResponse('Site temporarily unavailable', status=503)
| gpl-2.0 | 2,266,068,532,907,021,300 | 32.4375 | 72 | 0.713551 | false | 3.821429 | false | false | false |
roytang121/Facebook-AutoPoke | autopoke.py | 1 | 1957 | #!/usr/bin/
import urllib2, urllib
from bs4 import BeautifulSoup
import getpass
from cookielib import CookieJar
import time
import re
def getlsd(soup):
return soup.find('input', {'name':'lsd'})['value']
def get_lgnrnd(soup):
return soup.find('input', {'name':'lgnrnd'})['value']
def get_lgnjs(soup):
return soup.find('input', {'id':'lgnjs'})['value']
def prepare_value():
values = {
'lsd': lsd,
'lgnrnd':lgnrnd,
'lgnjs':lgnjs,
'email': email,
'pass': pwd,
'persistent':1,
'default_persistent':1,
'locale': 'en_US',
}
return values
if __name__ == "__main__":
email = raw_input('FB Email: ')
pwd = getpass.getpass()
login_url = "https://www.facebook.com/login.php?login_attempt=1"
cj = CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
response = opener.open(login_url)
soup = BeautifulSoup(response.read())
lsd = getlsd(soup)
lgnrnd = get_lgnrnd(soup)
lgnjs = get_lgnjs(soup)
values = prepare_value()
#print values #debugging
print "\n\nStart login\n\n"
params = urllib.urlencode(values)
response = opener.open(login_url, params)
soup = BeautifulSoup(response.read())
div = soup.find('div', {'id':'pagelet_welcome_box'})
if div != None:
print "Login Success"
#pollloop
while True:
print "start auto poking"
poke_url = "https://www.facebook.com/pokes/"
response = opener.open(poke_url)
soup = BeautifulSoup(response.read())
mls_all = soup.find_all('div', {'class':'mls'})
for mls in mls_all:
a = mls.find('a', {'class':'_42ft _4jy0 _4jy3 _4jy1 selected'})
poke_action = a['ajaxify']
regex = re.compile('suggestion_type')
matcher = regex.search(poke_action)
if matcher:
print "suggestion, pass"
else:
base = "https://www.facebook.com"
print "poking to action: " + base + poke_action
opener.open(base + poke_action)
print 'Done'
print "All done, sleep"
time.sleep(5)
else:
print "Login failed"
| apache-2.0 | 369,400,945,964,829,060 | 22.865854 | 67 | 0.652529 | false | 2.783784 | false | false | false |
bwhite/hadoopy | tests/typedbytes_experiments/speedtest.py | 1 | 1417 | import glob
import subprocess
import time
import typedbytes
import cStringIO as StringIO
import itertools
def parse_tb(val):
fp = StringIO.StringIO(val)
for x in typedbytes.PairedInput(fp):
yield x
def time_script(script_name, data):
st = time.time()
p = subprocess.Popen(('python %s' % script_name).split(),
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
o = p.communicate(data)[0]
return o, time.time() - st
def main():
out = []
print('+-----------------+---------+---------+---------+---------+---------+')
print('|Filename |Hadoopy |HadoopyFP|TB |cTB |Ratio |')
print('+=================+=========+=========+=========+=========+=========+')
for fn in sorted(glob.glob('*.tb')):
with open(fn) as fp:
data = fp.read()
o0, t0 = time_script('speed_hadoopy.py', data)
o1, t1 = time_script('speed_hadoopyfp.py', data)
o2, t2 = time_script('speed_tb.py', data)
o3, t3 = time_script('speed_tbc.py', data)
out.append((fn, t0, t1, t2, t3, min([t2, t3]) / t1))
assert(o0 == o1 == o2 == o3)
out.sort(lambda x, y: cmp(x[-1], y[-1]), reverse=True)
for x in out:
print('|%17s|%9.6f|%9.6f|%9.6f|%9.6f|%9.6f|' % x)
print('+-----------------+---------+---------+---------+---------+---------+')
if __name__ == '__main__':
main()
| gpl-3.0 | -2,890,003,913,588,215,300 | 32.738095 | 86 | 0.468596 | false | 3.25 | false | false | false |
awdeorio/mailmerge | mailmerge/__main__.py | 1 | 12757 | """
Command line interface implementation.
Andrew DeOrio <awdeorio@umich.edu>
"""
from __future__ import print_function
import sys
import time
import codecs
import textwrap
import click
from .template_message import TemplateMessage
from .sendmail_client import SendmailClient
from . import exceptions
from . import utils
# Python 2 pathlib support requires backport
try:
from pathlib2 import Path
except ImportError:
from pathlib import Path
# Python 2 UTF8 support requires csv backport
try:
from backports import csv
except ImportError:
import csv
# Python 2 UTF8 file redirection
# http://www.macfreek.nl/memory/Encoding_of_Python_stdout
if sys.stdout.encoding != 'UTF-8' and not hasattr(sys.stdout, "buffer"):
sys.stdout = codecs.getwriter('utf-8')(sys.stdout, 'strict')
@click.command(context_settings={"help_option_names": ['-h', '--help']})
@click.version_option() # Auto detect version from setup.py
@click.option(
"--sample", is_flag=True, default=False,
help="Create sample template, database, and config",
)
@click.option(
"--dry-run/--no-dry-run", default=True,
help="Don't send email, just print (dry-run)",
)
@click.option(
"--no-limit", is_flag=True, default=False,
help="Do not limit the number of messages",
)
@click.option(
"--limit", is_flag=False, default=1,
type=click.IntRange(0, None),
help="Limit the number of messages (1)",
)
@click.option(
"--resume", is_flag=False, default=1,
type=click.IntRange(1, None),
help="Start on message number INTEGER",
)
@click.option(
"--template", "template_path",
default="mailmerge_template.txt",
type=click.Path(),
help="template email (mailmerge_template.txt)"
)
@click.option(
"--database", "database_path",
default="mailmerge_database.csv",
type=click.Path(),
help="database CSV (mailmerge_database.csv)",
)
@click.option(
"--config", "config_path",
default="mailmerge_server.conf",
type=click.Path(),
help="server configuration (mailmerge_server.conf)",
)
@click.option(
"--output-format", "output_format",
default="colorized",
type=click.Choice(["colorized", "text", "raw"]),
help="Output format (colorized).",
)
def main(sample, dry_run, limit, no_limit, resume,
template_path, database_path, config_path,
output_format):
"""
Mailmerge is a simple, command line mail merge tool.
For examples and formatting features, see:
https://github.com/awdeorio/mailmerge
"""
# We need an argument for each command line option. That also means a lot
# of local variables.
# pylint: disable=too-many-arguments, too-many-locals
# Convert paths from string to Path objects
# https://github.com/pallets/click/issues/405
template_path = Path(template_path)
database_path = Path(database_path)
config_path = Path(config_path)
# Make sure input files exist and provide helpful prompts
check_input_files(template_path, database_path, config_path, sample)
# Calculate start and stop indexes. Start and stop are zero-based. The
# input --resume is one-based.
start = resume - 1
stop = None if no_limit else resume - 1 + limit
# Run
message_num = 1 + start
try:
template_message = TemplateMessage(template_path)
csv_database = read_csv_database(database_path)
sendmail_client = SendmailClient(config_path, dry_run)
for _, row in enumerate_range(csv_database, start, stop):
sender, recipients, message = template_message.render(row)
while True:
try:
sendmail_client.sendmail(sender, recipients, message)
except exceptions.MailmergeRateLimitError:
print_bright_white_on_cyan(
">>> rate limit exceeded, waiting ...",
output_format,
)
else:
break
time.sleep(1)
print_bright_white_on_cyan(
">>> message {message_num}"
.format(message_num=message_num),
output_format,
)
print_message(message, output_format)
print_bright_white_on_cyan(
">>> message {message_num} sent"
.format(message_num=message_num),
output_format,
)
message_num += 1
except exceptions.MailmergeError as error:
hint_text = '\nHint: "--resume {}"'.format(message_num)
sys.exit(
"Error on message {message_num}\n"
"{error}"
"{hint}"
.format(
message_num=message_num,
error=error,
hint=(hint_text if message_num > 1 else ""),
)
)
# Hints for user
if not no_limit:
print(
">>> Limit was {limit} message{pluralizer}. "
"To remove the limit, use the --no-limit option."
.format(limit=limit, pluralizer=("" if limit == 1 else "s"))
)
if dry_run:
print(
">>> This was a dry run. "
"To send messages, use the --no-dry-run option."
)
if __name__ == "__main__":
# No value for parameter, that's how click works
# pylint: disable=no-value-for-parameter
main()
def check_input_files(template_path, database_path, config_path, sample):
"""Check if input files are present and hint the user."""
if sample:
create_sample_input_files(template_path, database_path, config_path)
sys.exit(0)
if not template_path.exists():
sys.exit(textwrap.dedent(u"""\
Error: can't find template "{template_path}".
Create a sample (--sample) or specify a file (--template).
See https://github.com/awdeorio/mailmerge for examples.\
""".format(template_path=template_path)))
if not database_path.exists():
sys.exit(textwrap.dedent(u"""\
Error: can't find database "{database_path}".
Create a sample (--sample) or specify a file (--database).
See https://github.com/awdeorio/mailmerge for examples.\
""".format(database_path=database_path)))
if not config_path.exists():
sys.exit(textwrap.dedent(u"""\
Error: can't find config "{config_path}".
Create a sample (--sample) or specify a file (--config).
See https://github.com/awdeorio/mailmerge for examples.\
""".format(config_path=config_path)))
def create_sample_input_files(template_path, database_path, config_path):
"""Create sample template, database and server config."""
for path in [template_path, database_path, config_path]:
if path.exists():
sys.exit("Error: file exists: {}".format(path))
with template_path.open("w") as template_file:
template_file.write(textwrap.dedent(u"""\
TO: {{email}}
SUBJECT: Testing mailmerge
FROM: My Self <myself@mydomain.com>
Hi, {{name}},
Your number is {{number}}.
"""))
with database_path.open("w") as database_file:
database_file.write(textwrap.dedent(u"""\
email,name,number
myself@mydomain.com,"Myself",17
bob@bobdomain.com,"Bob",42
"""))
with config_path.open("w") as config_file:
config_file.write(textwrap.dedent(u"""\
# Mailmerge SMTP Server Config
# https://github.com/awdeorio/mailmerge
#
# Pro-tip: SSH or VPN into your network first to avoid spam
# filters and server throttling.
#
# Parameters
# host # SMTP server hostname or IP
# port # SMTP server port
# security # Security protocol: "SSL/TLS", "STARTTLS", or omit
# username # Username for SSL/TLS or STARTTLS security
# ratelimit # Rate limit in messages per minute, 0 for unlimited
# Example: GMail
[smtp_server]
host = smtp.gmail.com
port = 465
security = SSL/TLS
username = YOUR_USERNAME_HERE
ratelimit = 0
# Example: SSL/TLS
# [smtp_server]
# host = smtp.mail.umich.edu
# port = 465
# security = SSL/TLS
# username = YOUR_USERNAME_HERE
# ratelimit = 0
# Example: STARTTLS security
# [smtp_server]
# host = newman.eecs.umich.edu
# port = 25
# security = STARTTLS
# username = YOUR_USERNAME_HERE
# ratelimit = 0
# Example: No security
# [smtp_server]
# host = newman.eecs.umich.edu
# port = 25
# ratelimit = 0
"""))
print(textwrap.dedent(u"""\
Created sample template email "{template_path}"
Created sample database "{database_path}"
Created sample config file "{config_path}"
Edit these files, then run mailmerge again.\
""".format(
template_path=template_path,
database_path=database_path,
config_path=config_path,
)))
def read_csv_database(database_path):
"""Read database CSV file, providing one line at a time.
We'll use a class to modify the csv library's default dialect ('excel') to
enable strict syntax checking. This will trigger errors for things like
unclosed quotes.
"""
class StrictExcel(csv.excel):
# Our helper class is really simple
# pylint: disable=too-few-public-methods, missing-class-docstring
strict = True
with database_path.open(mode="r", encoding="utf-8") as database_file:
reader = csv.DictReader(database_file, dialect=StrictExcel)
try:
for row in reader:
yield row
except csv.Error as err:
raise exceptions.MailmergeError(
"{}:{}: {}".format(database_path, reader.line_num, err)
)
def enumerate_range(iterable, start=0, stop=None):
"""Enumerate iterable, starting at index "start", stopping before "stop".
To enumerate the entire iterable, start=0 and stop=None.
"""
assert start >= 0
assert stop is None or stop >= 0
for i, value in enumerate(iterable):
if i < start:
continue
if stop is not None and i >= stop:
return
yield i, value
def print_cyan(string, output_format):
"""Print string to stdout, optionally enabling color."""
if output_format == "colorized":
string = "\x1b[36m" + string + "\x1b(B\x1b[m"
print(string)
def print_bright_white_on_cyan(string, output_format):
"""Print string to stdout, optionally enabling color."""
if output_format == "colorized":
string = "\x1b[7m\x1b[1m\x1b[36m" + string + "\x1b(B\x1b[m"
print(string)
def print_message(message, output_format):
"""Print a message with colorized output."""
assert output_format in ["colorized", "text", "raw"]
if output_format == "raw":
print(utils.flatten_message(message))
return
for header, value in message.items():
print(u"{header}: {value}".format(header=header, value=value))
print()
for part in message.walk():
if part.get_content_maintype() == "multipart":
pass
elif part.get_content_maintype() == "text":
if message.is_multipart():
# Only print message part dividers for multipart messages
print_cyan(
">>> message part: {content_type}"
.format(content_type=part.get_content_type()),
output_format,
)
charset = str(part.get_charset())
print(part.get_payload(decode=True).decode(charset))
print()
elif is_attachment(part):
print_cyan(
">>> message part: attachment {filename}"
.format(filename=part.get_filename()),
output_format,
)
else:
print_cyan(
">>> message part: {content_type}"
.format(content_type=part.get_content_type()),
output_format,
)
def is_attachment(part):
"""Return True if message part looks like an attachment."""
return (
part.get_content_maintype() != "multipart" and
part.get_content_maintype() != "text" and
part.get("Content-Disposition") != "inline" and
part.get("Content-Disposition") is not None
)
| mit | -3,540,119,977,301,396,500 | 31.963824 | 79 | 0.583601 | false | 4.006595 | true | false | false |
ndawe/pyAMI | pyAMI/objects.py | 1 | 2622 | from string import rjust
from cStringIO import StringIO
class DatasetInfo(object):
def __init__(self, dataset):
self.dataset = dataset
self.info = {}
self.extra = {}
self.comments = {}
self.properties = {}
def __repr__(self):
return self.__str__()
def __str__(self):
out = StringIO()
if self.info:
print >> out, ""
print >> out, "Dataset Parameters"
print >> out, "========================"
for name, value in self.info.items():
spaces = rjust(" ", 30 - len(name))
print >> out, name + spaces + value
if self.comments:
print >> out, ''
print >> out, "Comments"
print >> out, "========================"
for name, value in self.comments.items():
spaces = rjust(" ", 30 - len(name))
print >> out, name + spaces + value
if self.extra:
print >> out, ''
print >> out, "Extra Parameters"
print >> out, "========================"
for name, value in self.extra.items():
spaces = rjust(" ", 30 - len(name))
print >> out, name + spaces + value
if self.properties:
print >> out, ''
print >> out, "Other Physics Properties"
print >> out, "========================"
for physProp, tmpDict in self.properties.items():
valueToPrint = ''
if tmpDict.get("min") == tmpDict.get("max"):
valueToPrint = tmpDict.get("min")
else:
valueToPrint = ' - '.join([tmpDict.get("min"), tmpDict.get("max")])
print >> out, physProp + rjust(' ', 30 - len(physProp)) + \
' '.join([valueToPrint, tmpDict.get("unit"), '(%s)' % tmpDict.get("description")])
return out.getvalue()
class RunPeriod(object):
def __init__(self, project, name, level,
status,
description,
year=0):
self.project = project
self.year = year
self.name = name
self.level = level
self.status = status
self.description = description
def __cmp__(self, other):
if self.year > other.year:
return 1
elif self.year < other.year:
return -1
return cmp(self.name, other.name)
def __repr__(self):
return self.__str__()
def __str__(self):
return '%s %s' % (self.project, self.name)
| gpl-3.0 | 5,222,235,472,499,969,000 | 29.488372 | 106 | 0.455378 | false | 4.414141 | false | false | false |
heuermh/ga4gh-schemas | scripts/process_schemas.py | 1 | 10053 | """
A script to generate the schemas for the GA4GH protocol. These are generated
from a copy of the Protocol Buffers schema and use it to generate
the Python class definitions. These are also stored in revision
control to aid Travis building.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import os.path
import subprocess
import fnmatch
import re
import argparse
import shlex
# IMPORTANT!
# Do not import any ga4gh or otherwise non-standard packages in this file.
# process_schemas is included in ga4gh-schema's install path in setup.py.
# Importing, for instance, ga4gh-common here will break an install if
# the environment does not have that package installed previously.
# We really want to avoid this scenario!
# (This does result in some code duplication in this file.)
# Below code duplicated from ga4gh-common
def runCommandSplits(splits, silent=False, shell=False):
"""
Run a shell command given the command's parsed command line
"""
try:
if silent:
with open(os.devnull, 'w') as devnull:
subprocess.check_call(
splits, stdout=devnull, stderr=devnull, shell=shell)
else:
subprocess.check_call(splits, shell=shell)
except OSError, e:
if e.errno == 2: # cmd not found
raise Exception(
"Can't find command while trying to run {}".format(splits))
else:
raise
def runCommand(command, silent=False, shell=False):
"""
Run a shell command
"""
splits = shlex.split(command)
runCommandSplits(splits, silent=silent, shell=shell)
# Above code duplicated from ga4gh-common
class ProtobufGenerator(object):
def __init__(self, version):
self.version = version
def _createSchemaFiles(self, destPath, schemasPath):
"""
Create a hierarchy of proto files in a destination directory, copied
from the schemasPath hierarchy
"""
# Create the target directory hierarchy, if neccessary
ga4ghPath = os.path.join(destPath, 'ga4gh')
if not os.path.exists(ga4ghPath):
os.mkdir(ga4ghPath)
ga4ghSchemasPath = os.path.join(ga4ghPath, 'schemas')
if not os.path.exists(ga4ghSchemasPath):
os.mkdir(ga4ghSchemasPath)
ga4ghSchemasGa4ghPath = os.path.join(ga4ghSchemasPath, 'ga4gh')
if not os.path.exists(ga4ghSchemasGa4ghPath):
os.mkdir(ga4ghSchemasGa4ghPath)
ga4ghSchemasGooglePath = os.path.join(ga4ghSchemasPath, 'google')
if not os.path.exists(ga4ghSchemasGooglePath):
os.mkdir(ga4ghSchemasGooglePath)
ga4ghSchemasGoogleApiPath = os.path.join(
ga4ghSchemasGooglePath, 'api')
if not os.path.exists(ga4ghSchemasGoogleApiPath):
os.mkdir(ga4ghSchemasGoogleApiPath)
# rewrite the proto files to the destination
for root, dirs, files in os.walk(schemasPath):
for protoFilePath in fnmatch.filter(files, '*.proto'):
src = os.path.join(root, protoFilePath)
dst = os.path.join(
ga4ghSchemasPath,
os.path.relpath(root, schemasPath), protoFilePath)
self._copySchemaFile(src, dst)
def _doLineReplacements(self, line):
"""
Given a line of a proto file, replace the line with one that is
appropriate for the hierarchy that we want to compile
"""
# ga4gh packages
packageString = 'package ga4gh;'
if packageString in line:
return line.replace(
packageString,
'package ga4gh.schemas.ga4gh;')
importString = 'import "ga4gh/'
if importString in line:
return line.replace(
importString,
'import "ga4gh/schemas/ga4gh/')
# google packages
googlePackageString = 'package google.api;'
if googlePackageString in line:
return line.replace(
googlePackageString,
'package ga4gh.schemas.google.api;')
googleImportString = 'import "google/api/'
if googleImportString in line:
return line.replace(
googleImportString,
'import "ga4gh/schemas/google/api/')
optionString = 'option (google.api.http)'
if optionString in line:
return line.replace(
optionString,
'option (.ga4gh.schemas.google.api.http)')
return line
def _copySchemaFile(self, src, dst):
"""
Copy a proto file to the temporary directory, with appropriate
line replacements
"""
with open(src) as srcFile, open(dst, 'w') as dstFile:
srcLines = srcFile.readlines()
for srcLine in srcLines:
toWrite = self._doLineReplacements(srcLine)
dstFile.write(toWrite)
def _find_in_path(self, cmd):
PATH = os.environ.get("PATH", os.defpath).split(os.pathsep)
for x in PATH:
possible = os.path.join(x, cmd)
if os.path.exists(possible):
return possible
return None
def _assertSchemasExist(self, schemas_path):
if not os.path.exists(schemas_path):
raise Exception(
"Can't find schemas folder. " +
"Thought it would be at {}".format(
os.path.realpath(schemas_path)))
def _assertProtoDirectoryExists(self, source_path):
if not os.path.exists(source_path):
msg = "Can't find source proto directory {}".format(
os.path.realpath(source_path))
raise Exception(msg)
# From http://stackoverflow.com/a/1714190/320546
def _version_compare(self, version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
return cmp(normalize(version1), normalize(version2))
def _getProtoc(self, destination_path):
protocs = [
os.path.realpath(x) for x in
"{}/protobuf/src/protoc".format(destination_path),
self._find_in_path("protoc")
if x is not None]
protoc = None
for c in protocs:
if not os.path.exists(c):
continue
output = subprocess.check_output([c, "--version"]).strip()
try:
(lib, version) = output.split(" ")
if lib != "libprotoc":
raise Exception("lib didn't match 'libprotoc'")
if self._version_compare("3.0.0", version) > 0:
raise Exception("version < 3.0.0")
protoc = c
break
except Exception:
print(
"Not using {path} because it returned " +
"'{version}' rather than \"libprotoc <version>\", where " +
"<version> >= 3.0.0").format(path=c, format=output)
if protoc is None:
raise Exception("Can't find a good protoc. Tried {}".format(
protocs))
print("Using protoc: '{}'".format(protoc))
return protoc
def _writePythonFiles(self, source_path, protoc, destination_path):
protos = []
for root, dirs, files in os.walk(source_path):
protos.extend([
os.path.join(root, f)
for f in fnmatch.filter(files, "*.proto")])
if len(protos) == 0:
raise Exception(
"Didn't find any proto files in '{}'".format(source_path))
print("pb2 files destination: '{}'".format(destination_path))
cmdString = (
"{protoc} -I {source_path} -I ./src/main "
"--python_out={destination_path} {proto_files}")
cmd = cmdString.format(
protoc=protoc, source_path=source_path,
destination_path=destination_path,
proto_files=" ".join(protos))
runCommand(cmd)
print("{} pb2 files written".format(len(protos)))
def _writeVersionFile(self):
versionFilePath = "python/ga4gh/schemas/_protocol_version.py"
with open(versionFilePath, "w") as version_file:
version_file.write(
"# File generated by scripts/process_schemas.py; "
"do not edit\n")
version_file.write("version = '{}'\n".format(self.version))
def run(self, args):
script_path = os.path.dirname(os.path.realpath(__file__))
destination_path = os.path.realpath(
os.path.join(script_path, args.destpath))
schemas_path = os.path.realpath(args.schemapath)
protoc = self._getProtoc(destination_path)
print("Writing protocol version '{}'".format(args.version))
print("Proto files source: '{}'".format(schemas_path))
print("Rewritten proto files source: '{}'".format(destination_path))
self._createSchemaFiles(destination_path, schemas_path)
self._writePythonFiles(destination_path, protoc, destination_path)
self._writeVersionFile()
def main(args=None):
defaultDestPath = "../python/"
defaultSchemasPath = '../src/main/proto/'
parser = argparse.ArgumentParser(
description="Script to process GA4GH Protocol buffer schemas")
parser.add_argument(
"version", help="Version number of the schema we're compiling")
parser.add_argument(
"-s", "--schemapath", default=defaultSchemasPath,
help="Path to schemas (defaults to {})".format(defaultSchemasPath))
parser.add_argument(
"-d", "--destpath", default=defaultDestPath,
help=(
"the directory in which to write the compiled schema files "
"(defaults to {})".format(defaultDestPath)))
parsedArgs = parser.parse_args(args)
pb = ProtobufGenerator(parsedArgs.version)
pb.run(parsedArgs)
if __name__ == "__main__":
main()
| apache-2.0 | -7,433,448,057,057,686,000 | 37.370229 | 79 | 0.598627 | false | 4.076642 | false | false | false |
flgiordano/netcash | +/google-cloud-sdk/lib/surface/compute/instance_groups/managed/get_named_ports.py | 1 | 3282 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""instance-groups managed get-named-ports command.
It's an alias for the instance-groups get-named-ports command.
"""
from googlecloudsdk.api_lib.compute import instance_groups_utils
from googlecloudsdk.api_lib.compute import request_helper
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA)
class GetNamedPorts(instance_groups_utils.InstanceGroupGetNamedPorts):
@staticmethod
def Args(parser):
instance_groups_utils.InstanceGroupGetNamedPorts.AddArgs(
parser=parser, multizonal=False)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class GetNamedPortsAlpha(instance_groups_utils.InstanceGroupGetNamedPorts,
instance_groups_utils.InstanceGroupReferenceMixin):
@staticmethod
def Args(parser):
instance_groups_utils.InstanceGroupGetNamedPorts.AddArgs(
parser=parser, multizonal=True)
def GetResources(self, args):
"""Retrieves response with named ports."""
group_ref = self.CreateInstanceGroupReference(
name=args.name, region=args.region, zone=args.zone,
zonal_resource_type='instanceGroups',
regional_resource_type='regionInstanceGroups')
if group_ref.Collection() == 'compute.instanceGroups':
service = self.compute.instanceGroups
request = service.GetRequestType('Get')(
instanceGroup=group_ref.Name(),
zone=group_ref.zone,
project=self.project)
else:
service = self.compute.regionInstanceGroups
request = service.GetRequestType('Get')(
instanceGroup=group_ref.Name(),
region=group_ref.region,
project=self.project)
errors = []
results = list(request_helper.MakeRequests(
requests=[(service, 'Get', request)],
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None))
return results, errors
GetNamedPorts.detailed_help = {
'brief': ('Lists the named ports for a managed instance group'),
'DESCRIPTION': """\
Named ports are key:value pairs metadata representing the service name and the
port that it's running on. Named ports can be assigned to an instance group,
which indicates that the service is available on all instances in the group.
This information is used by the HTTP Load Balancing service.
For example, to list named ports (name and port tuples) for a managed instance
group:
$ {command} example-instance-group --zone us-central1-a
The above example lists named ports assigned to an instance group named
``example-instance-group'' in the ``us-central1-a'' zone.
""",
}
GetNamedPortsAlpha.detailed_help = GetNamedPorts.detailed_help
| bsd-3-clause | 2,675,059,091,715,991,600 | 36.295455 | 78 | 0.73309 | false | 4.046856 | false | false | false |
aitmlouk/Mon_PFE_Resources | Ticket_module/ticket.py | 1 | 4247 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2012-2013 Ait Mlouk Addi (<http://aitmlouk.esy.es/>).
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class service_ticket(osv.osv):
"""(NULL)"""
_name = 'service.ticket'
_description = u'Tickets'
def create(self, cr, user, vals, context=None):
u"""mΓ©thode crΓ©er"""
if ('name' not in vals) or (vals.get('name')=='/'):
vals['name'] = self.pool.get('ir.sequence').get(cr, user, 'service.ticket')
return super(service_ticket, self).create(cr, user, vals, context)
def action_done(self, cr, uid, ids, context=None):
return self.write(cr,uid,ids,{'state' : 'done'})
def action_cancel(self, cr, uid, ids, context=None):
return self.write(cr,uid,ids,{'state' : 'cancel'})
def action_draft(self, cr, uid, ids, context=None):
return self.write(cr,uid,ids,{'state' : 'draft'})
_columns = {
'name': fields.char('Nom',size=60,required=True),
'user': fields.many2one('res.users','responsable',size=60,required=True),
'client': fields.many2one('res.partner','Client'),
'place': fields.char('Affecter a',size=60,required=True),
'detaille': fields.char('Detaille'),
'date_creation': fields.date('Date creation',required=True),
'titre_ticket': fields.char('Titre ticket'),
'contact_appelent': fields.char('Contact appelent'),
'motif': fields.text('Motif'),
'state' : fields.selection([('draft',u'En cours'),('done',u'ValidΓ©'),('cancel',u'AnnulΓ©')],u'Statut',required=True),
'date_fermiture': fields.date('Date Fermeture'),
'intervention': fields.one2many('service.intervention','ticket','intervention'),
}
_defaults = {
'state': lambda *a: 'draft',
'name': lambda self, cr, uid, context: '/',
}
def action_print(self,cr,uid,ids,context={}):
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, ['id'], context=context)
res = res and res[0] or {}
datas['form'] = res
if res.get('id',False):
datas['ids']=[res['id']]
return {
'type': 'ir.actions.report.xml',
'report_name': 'service.ticket',
'datas': datas,
}
service_ticket()
class service_intervention(osv.osv):
_name = 'service.intervention'
_columns = {
'client': fields.many2one('res.partner','client'),
'date_debut': fields.date('Date Debut'),
'lieu_intervention': fields.selection([('sursite','Sure Site'),('en atelier','En atelier'),('a distance','A Distance')],'Lieu intervention'),
'intervenent': fields.char('Intervenent'),
'auteur': fields.char('Auteur'),
'titre_intervention': fields.char('Titre intervention'),
'prestation': fields.text('Prestation'),
'rapport': fields.text('Rapport'),
'date_fin': fields.date('Date fin'),
'state': fields.selection([('en cours','En cours'),('traiter','Traiter'),('diagnostic','Diagnostic'),('contrat','contrat')],'Etat'),
'ticket': fields.many2one('service.ticket','titre ticket','ticket'),
}
service_intervention()
| agpl-3.0 | -4,533,865,454,226,342,400 | 40.194175 | 149 | 0.583549 | false | 3.654608 | false | false | false |
JianfengYao/thefuck | tests/rules/test_ssh_known_host.py | 1 | 2591 | import os
import pytest
from mock import Mock
from thefuck.rules.ssh_known_hosts import match, get_new_command,\
remove_offending_keys
from tests.utils import Command
@pytest.fixture
def ssh_error(tmpdir):
path = os.path.join(str(tmpdir), 'known_hosts')
def reset(path):
with open(path, 'w') as fh:
lines = [
'123.234.567.890 asdjkasjdakjsd\n'
'98.765.432.321 ejioweojwejrosj\n'
'111.222.333.444 qwepoiwqepoiss\n'
]
fh.writelines(lines)
def known_hosts(path):
with open(path, 'r') as fh:
return fh.readlines()
reset(path)
errormsg = u"""@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
IT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!
Someone could be eavesdropping on you right now (man-in-the-middle attack)!
It is also possible that a host key has just been changed.
The fingerprint for the RSA key sent by the remote host is
b6:cb:07:34:c0:a0:94:d3:0d:69:83:31:f4:c5:20:9b.
Please contact your system administrator.
Add correct host key in {0} to get rid of this message.
Offending RSA key in {0}:2
RSA host key for {1} has changed and you have requested strict checking.
Host key verification failed.""".format(path, '98.765.432.321')
return errormsg, path, reset, known_hosts
def test_match(ssh_error):
errormsg, _, _, _ = ssh_error
assert match(Command('ssh', stderr=errormsg), None)
assert match(Command('ssh', stderr=errormsg), None)
assert match(Command('scp something something', stderr=errormsg), None)
assert match(Command('scp something something', stderr=errormsg), None)
assert not match(Command(stderr=errormsg), None)
assert not match(Command('notssh', stderr=errormsg), None)
assert not match(Command('ssh'), None)
def test_remove_offending_keys(ssh_error):
errormsg, path, reset, known_hosts = ssh_error
command = Command('ssh user@host', stderr=errormsg)
remove_offending_keys(command, None)
expected = ['123.234.567.890 asdjkasjdakjsd\n', '111.222.333.444 qwepoiwqepoiss\n']
assert known_hosts(path) == expected
def test_get_new_command(ssh_error, monkeypatch):
errormsg, _, _, _ = ssh_error
method = Mock()
monkeypatch.setattr('thefuck.rules.ssh_known_hosts.remove_offending_keys', method)
assert get_new_command(Command('ssh user@host', stderr=errormsg), None) == 'ssh user@host'
assert method.call_count
| mit | -1,090,053,225,910,422,300 | 36.014286 | 94 | 0.652644 | false | 3.283904 | true | false | false |
crestify/crestify | crestify/services/tab.py | 1 | 1304 | from crestify.models import Tab, db
import shortuuid
import json
import datetime
def new(user_id, tabs, title):
tabs = json.loads(tabs)
new_tabs = Tab()
new_tabs.user = user_id
new_tabs.id = shortuuid.uuid()
new_tabs.title = title
new_tabs.tabs = tabs
new_tabs.added_on = datetime.datetime.utcnow()
for tab in tabs:
if tab.has_key('title') and tab.has_key('url'): # Each tab MUST have a title and a URL
pass
else:
del new_tabs
return False
db.session.add(new_tabs)
db.session.commit()
def edit_title(id, user_id, title):
try:
edit_tab = Tab.query.get(id)
except:
return False
if edit_tab:
if edit_tab.user == user_id:
edit_tab.title = title
db.session.commit()
return True
else:
return False
else:
return False
def delete(id, user_id):
try:
delete_tabs = Tab.query.get(id)
except:
print "exception"
return False
if delete_tabs:
if delete_tabs.user == user_id:
db.session.delete(delete_tabs)
db.session.commit()
print "deleted"
return True
else:
return False
else:
return False
| bsd-3-clause | -5,377,935,641,667,188,000 | 22.285714 | 95 | 0.553681 | false | 3.73639 | false | false | false |
ylcolala/yelp-image-classification | yelpimages/yelpimages/spiders/yelp_spider.py | 2 | 1975 | import scrapy
import json
from yelpimages.items import YelpimagesItem
class DemoSpider(scrapy.Spider):
name = "yelpdemo"
allowed_domains = ["yelp.com"]
base_url = "http://www.yelp.com/biz_photos/"
start_urls = []
next_page = ["?start=100", "?start=200", "?start=300"]
tacodeli = ["tacodeli-austin-3", "tacodeli-austin-4",
"tacodeli-austin-6", "tacodeli-austin-11",
"tacodeli-west-lake-hills"]
for each in tacodeli:
curr_url = base_url + each
next_url = curr_url + next_page[0]
start_urls.append(curr_url)
start_urls.append(next_url)
torchys = "torchys-tacos-austin"
start_urls.append(base_url + torchys)
for i in range(0, len(next_page)):
start_urls.append(base_url + torchys + next_page[i])
index = [3, 4, 6, 7, 10, 11, 12, 13]
for i in range(0, len(index)):
curr_url = base_url + torchys + '-' + str(index[i])
next_url = curr_url + next_page[0]
start_urls.append(curr_url)
start_urls.append(next_url)
# print start_urls
"""
rules = ( Rule (SgmlLinkExtractor(restrict_xpaths=('//a[@class="page-option available-number"]/@href',)), follow= True),
Rule (SgmlLinkExtractor(restrict_xpaths=('//div[@class="foto_imovel"]',)), callback='parse')
)
"""
def parse(self, response):
item = YelpimagesItem()
item['image_urls'] = []
"""
page_info = response.xpath('//div[@class="page-of-pages arrange_unit arrange_unit--fill"]/text()').extract()[0].strip()
# u'Page 1 of 2'
index = page_info.rfind('f')
page_nums = int(page_info[i+2:])
"""
urls = response.xpath('//div[@class="photo-box biz-photo-box pb-ms"]/a/img/@src').extract()
for url in urls:
k = url.rfind('/')
url = url[:k] + '/ls.jpg'
item['image_urls'].append(url)
yield item
| mit | -4,021,232,041,586,784,000 | 31.377049 | 127 | 0.560506 | false | 3.144904 | false | false | false |
wenbin5/Benson | _helper.py | 1 | 3871 | """
_helper.py
helper mehtods and other stuff for modules to import.
"""
__all__ = [
'QUADRANT1', 'QUADRANT2', 'QUADRANT3', 'QUADRANT4',
'QUADRANTS', 'set_quadrant', 'set_quadrants', 'speak',
]
import os
from gtts import gTTS
from sense_hat import SenseHat
ALPHANUMERICS = {
'0': (2, 5, 7, 9, 11, 14),
'1': (1, 2, 6, 10, 13, 14, 15),
'2': (1, 2, 3, 6, 7, 9, 13, 14, 15),
'3': (1, 2, 3, 6, 7, 11, 13, 14, 15),
'4': (1, 3, 5, 7, 9, 10, 11, 15),
'5': (1, 2, 3, 5, 6, 11, 13, 14, 15),
'6': (1, 2, 3, 5, 9, 10, 11, 13, 14, 15),
'7': (1, 2, 3, 7, 10, 13),
'8': (1, 2, 3, 5, 6, 7, 9, 11, 13, 14, 15),
'9': (1, 2, 3, 5, 6, 7, 11, 13, 14, 15),
'a': (2, 5, 7, 9, 10, 11, 13, 15),
'b': (1, 2, 5, 6, 9, 10, 11, 13, 14, 15),
'c': (1, 2, 3, 5, 9, 13, 14, 15),
'd': (1, 2, 5, 7, 9, 11, 13, 14),
'e': (1, 2, 3, 5, 6, 9, 13, 14, 15),
'f': (1, 2, 3, 5, 9, 10, 11, 13),
'g': (1, 2, 5, 9, 10, 11, 13, 14),
'h': (1, 3, 5, 7, 9, 10, 11, 13, 15),
'i': (2, 6, 10, 14),
'j': (3, 7, 9, 11, 13, 14),
'k': (1, 3, 5, 6, 9, 10, 13, 15),
'l': (1, 5, 9, 13, 14, 15),
'm': (1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 15),
'n': (1, 2, 3, 5, 7, 9, 11, 13, 15),
'o': (1, 2, 3, 5, 7, 9, 11, 13, 14, 15),
'p': (1, 2, 5, 7, 9, 10, 13),
'q': (1, 2, 3, 5, 7, 9, 10, 11, 15),
'r': (1, 2, 5, 7, 9, 10, 13, 15),
's': (2, 3, 5, 6, 11, 13, 14),
't': (1, 2, 3, 6, 10, 14),
'u': (1, 3, 5, 7, 9, 11, 13, 14, 15),
'v': (1, 3, 5, 7, 9, 11, 14),
'w': (1, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15),
'x': (1, 3, 5, 7, 10, 13, 15),
'y': (1, 3, 5, 7, 10, 14),
'z': (1, 2, 3, 6, 9, 13, 14, 15)
}
QUADRANT1 = (
(0, 0), (1, 0), (2, 0), (3, 0),
(0, 1), (1, 1), (2, 1), (3, 1),
(0, 2), (1, 2), (2, 2), (3, 2),
(0, 3), (1, 3), (2, 3), (3, 3)
)
QUADRANT2 = (
(4, 0), (5, 0), (6, 0), (7, 0),
(4, 1), (5, 1), (6, 1), (7, 1),
(4, 2), (5, 2), (6, 2), (7, 2),
(4, 3), (5, 3), (6, 3), (7, 3)
)
QUADRANT3 = (
(0, 4), (1, 4), (2, 4), (3, 4),
(0, 5), (1, 5), (2, 5), (3, 5),
(0, 6), (1, 6), (2, 6), (3, 6),
(0, 7), (1, 7), (2, 7), (3, 7)
)
QUADRANT4 = (
(4, 4), (5, 4), (6, 4), (7, 4),
(4, 5), (5, 5), (6, 5), (7, 5),
(4, 6), (5, 6), (6, 6), (7, 6),
(4, 7), (5, 7), (6, 7), (7, 7)
)
QUADRANTS = (QUADRANT1, QUADRANT2, QUADRANT3, QUADRANT4)
sense = SenseHat()
def set_quadrant(quadrant, character, color):
"""Set a given quadrant to show a given character."""
for index, coordinate_value in enumerate(quadrant):
x, y = coordinate_value
if index in ALPHANUMERICS[character.lower()]:
pixel_color = color
else:
pixel_color = (0, 0, 0)
sense.rotation = 90
sense.set_pixel(x, y, pixel_color)
return None
def set_quadrants(quadrants, characters, color):
"""Set all 4 QUADRANTs to show the given characters."""
for q, c in zip(quadrants, characters):
set_quadrant(q, c, color)
return None
def speak(name='tts', msg=''):
"""Speak the message given using google text to speech API."""
vlc_command = 'cvlc --play-and-exit '
file_path = 'Benson/audio/'
file_name = name + '.mp3'
play_audio_command = vlc_command + file_path + file_name
if msg:
audio = gTTS(text=msg, lang='en')
audio.save('./' + file_path + file_name)
os.system(play_audio_command)
return None
def get_random_number(modulo):
"""Generate a random number using sense hat sensor data."""
humidity = sense.get_humidity()
temperature = sense.get_temperature()
pressure = sense.get_pressure()
ambient_sum = humidity + temperature + pressure
x, y = str(ambient_sum).split('.')
ambient_random = int(y) % int(x)
python_random = ord(os.urandom(1))
return (ambient_random + python_random) % modulo | mit | -2,225,324,213,157,696,300 | 28.557252 | 66 | 0.461121 | false | 2.233699 | false | false | false |
sonicpp/Dune-game-translations | utils/font.py | 1 | 4878 | #!/usr/bin/env python3
"""
font.py - font util for DUNECHAR file (extracted from DUNECHAR.HSQ)
this util can:
- dump current DUNECHAR file into image
- load new character font from image into DUNECHAR file
"""
import sys
import argparse
from PIL import Image
__author__ = "Jan Havran"
DUMP_WIDTH = 16
DUMP_HEIGHT = 16
DUMP_ROW_WIDTH = 8
DUMP_ROW_HEIGHT = 10
CHAR_TABLE_CNT = DUMP_WIDTH * DUMP_HEIGHT
CHAR_UPP_TABLE_CNT = 128
CHAR_LOW_TABLE_CNT = CHAR_TABLE_CNT - CHAR_UPP_TABLE_CNT
CHAR_UPP_HEIGHT = 9
CHAR_LOW_HEIGHT = 7
FILE_TOTAL_SIZE = CHAR_TABLE_CNT + \
CHAR_UPP_TABLE_CNT * CHAR_UPP_HEIGHT + \
CHAR_LOW_TABLE_CNT * CHAR_LOW_HEIGHT
col_black = (0x00, 0x00, 0x00)
col_red = (0xD0, 0xA0, 0xA0)
col_green = (0xA0, 0xD0, 0xA0)
class VerifyLoad(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if args.dump is not None:
parser.error('--load should not be used with --dump')
setattr(args, self.dest, values)
class VerifyDump(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if args.load is not None:
parser.error('--dump should not be used with --load')
setattr(args, self.dest, values)
def dump(data, out_img):
mask = [0] * CHAR_TABLE_CNT
avail = list(range(0x15, 0x40)) + list(range(0x41, 0x60)) + \
list(range(0x61, 0x80))
for char in avail:
mask[char] = 1
base = CHAR_TABLE_CNT
for char in range(0, CHAR_TABLE_CNT):
width = data[char]
is_upper = char < CHAR_UPP_TABLE_CNT
height = CHAR_UPP_HEIGHT if is_upper else CHAR_LOW_HEIGHT
color = col_red if mask[char] == 0 else col_green
# Draw cell
for y in range(0, height):
row = data[base + y]
# Draw row (background)
for x in range(0, min(width, 8)):
pos_x = (char % DUMP_WIDTH) * \
DUMP_ROW_WIDTH + x
pos_y = (char // DUMP_HEIGHT) * \
DUMP_ROW_HEIGHT + y
img.putpixel((pos_x, pos_y), color)
# Draw row (character)
for x in range(0,8):
if (row & 0b10000000):
pos_x = (char % DUMP_WIDTH) * \
DUMP_ROW_WIDTH + x
pos_y = (char // DUMP_HEIGHT) * \
DUMP_ROW_HEIGHT + y
img.putpixel((pos_x, pos_y), col_black)
row = row << 1
base += height
def load(data, img, pos, out_file):
if img.width == 0 or img.width > 8:
sys.stderr.write("Wrong image width\n")
return 1
if pos < 0 or pos >= CHAR_TABLE_CNT:
sys.stderr.write("Wrong character position (out of table)\n")
return 1
if pos < CHAR_UPP_TABLE_CNT and img.height != CHAR_UPP_HEIGHT:
sys.stderr.write("Wrong image height (must be exactly " \
+ CHAR_UPP_HEIGHT + ")\n")
return 1
elif pos >= CHAR_UPP_TABLE_CNT and img.height != CHAR_LOW_HEIGHT:
sys.stderr.write("Wrong image height (must be exactly " \
+ CHAR_LOW_HEIGHT + ")\n")
return 1
data_width = []
data_width.append(img.width)
# Write char width
out_file.write(data[:pos] + bytes(data_width) + \
data[pos + 1:CHAR_TABLE_CNT])
base = CHAR_TABLE_CNT + \
min(pos, CHAR_UPP_TABLE_CNT) * CHAR_UPP_HEIGHT + \
max(pos - CHAR_UPP_TABLE_CNT, 0) * CHAR_LOW_HEIGHT
data_font = []
char_height = CHAR_UPP_HEIGHT if pos < CHAR_UPP_TABLE_CNT else \
CHAR_LOW_HEIGHT
# Encode character
for y in range(0, char_height):
row = data[base + y]
byte = 0
for x in range(0, 8):
if img.getpixel((x, y)) == col_black:
byte = byte | (1 << (7 - x))
data_font.append(byte)
# Write encoded chars
out_file.write(data[CHAR_TABLE_CNT:base] + bytes(data_font) + \
data[base + char_height:])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('DUNECHAR', help='file extracted from DUNECHAR.HSQ (stdin default)', nargs='?')
parser.add_argument('--dump', help='dump DUNECHAR as image and print it to output', action=VerifyDump)
parser.add_argument('--load', help='load character raster image into new DUNECHAR file', action=VerifyLoad)
parser.add_argument('--position', help='specify character position in DUNECHAR (0-255) for image load', type=int, default=20)
parser.add_argument('--output', help='specify output file (stdout default)')
args = parser.parse_args()
# Input file
if args.DUNECHAR:
in_file = open(args.DUNECHAR, 'rb')
else:
in_file = sys.stdin.buffer
data = in_file.read()
if args.DUNECHAR:
in_file.close()
if type(data) != bytes:
sys.stderr.write("Wrong data type\n")
sys.exit(1)
if len(data) != FILE_TOTAL_SIZE:
sys.stderr.write("Wrong file size of DUNECHAR")
sys.exit(1)
# Output file
if args.output:
out_file = open(args.output, 'wb')
else:
out_file = sys.stdout.buffer
if args.dump:
img = Image.new('RGB', (DUMP_WIDTH * DUMP_ROW_WIDTH, \
DUMP_HEIGHT * DUMP_ROW_HEIGHT), 'white')
dump(data, img)
img.save(out_file, 'PNG')
if args.load:
img = Image.open(args.load)
load(data, img, args.position, out_file)
if args.output:
out_file.close()
| gpl-2.0 | -7,565,710,109,822,217,000 | 27.196532 | 126 | 0.651087 | false | 2.665574 | false | false | false |
mantidproject/mantid | scripts/Interface/ui/drill/presenter/DrillExportPresenter.py | 3 | 1586 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
class DrillExportPresenter:
"""
Export view.
"""
_view = None
"""
Export model.
"""
_model = None
def __init__(self, view, model):
"""
Create the export presenter.
Args:
view (DrillExportDialog): export view
model (DrillExportModel): export model
"""
self._view = view
self._model = model
self._view.setPresenter(self)
algorithms = self._model.getAlgorithms()
extensions = self._model.getAlgorithmExtentions()
tooltips = self._model.getAlgorithmDocs()
self._view.setAlgorithms(algorithms, extensions, tooltips)
states = dict()
for a in algorithms:
states[a] = self._model.isAlgorithmActivated(a)
self._view.setAlgorithmCheckStates(states)
self._view.accepted.connect(self.onAccept)
self._view.show()
def onAccept(self):
"""
Triggered when the view has been validated. This method saves the
activation state of each algorithm in the model.
"""
states = self._view.getAlgorithmCheckStates()
for a,s in states.items():
if s:
self._model.activateAlgorithm(a)
else:
self._model.inactivateAlgorithm(a)
| gpl-3.0 | -2,078,911,663,853,081,900 | 28.924528 | 73 | 0.602144 | false | 4.035623 | false | false | false |
unioslo/cerebrum | Cerebrum/modules/no/uio/voip/voipService.py | 1 | 6129 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright 2010 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""This module implements voip_service functionality for voip.
Each voip_address is either personal or non-personal. The latter category is
meant to cover situations like phones in elevators, meeting rooms, etc. These
'non-personal' locations are represented by voip_service in voip; so that a
voip_address is bound either to a person or to a voip_service entity.
"""
import cereconf
from Cerebrum.modules.EntityTrait import EntityTrait
from Cerebrum.Entity import EntityContactInfo
from Cerebrum.Utils import argument_to_sql
class VoipService(EntityTrait, EntityContactInfo):
"""voip_service interface.
"""
__read_attr__ = ("__in_db",)
__write_attr__ = ("description",
"service_type",
"ou_id")
def clear(self):
self.__super.clear()
self.clear_class(VoipService)
self.__updated = list()
# end clear
def populate(self, description, service_type, ou_id):
"""Create a new VoipService instance in memory.
"""
EntityTrait.populate(self, self.const.entity_voip_service)
try:
if not self.__in_db:
raise RuntimeError("populate() called multiple times.")
except AttributeError:
self.__in_db = False
self.description = description
self.service_type = self.const.VoipServiceTypeCode(int(service_type))
self.ou_id = int(ou_id)
# end populate
def write_db(self):
"""Synchronise the object in memory with the database."""
self.__super.write_db()
if not self.__updated:
return
is_new = not self.__in_db
binds = {"entity_type": int(self.const.entity_voip_service),
"entity_id": int(self.entity_id),
"description": self.description,
"service_type": int(self.const.VoipServiceTypeCode(self.service_type)),
"ou_id": int(self.ou_id),}
if is_new:
self.execute("""
INSERT INTO [:table schema=cerebrum name=voip_service]
VALUES (:entity_type, :entity_id, :description,
:service_type, :ou_id)
""", binds)
else:
self.execute("""
UPDATE [:table schema=cerebrum name=voip_service]
SET description = :description,
service_type = :service_type,
ou_id = :ou_id
WHERE entity_id = :entity_id
""", binds)
del self.__in_db
self.__in_db = True
self.__updated = list()
return is_new
# end write_db
def delete(self):
"""Remove a specified entry from the voip_service table."""
if self.__in_db:
self.execute("""
DELETE FROM [:table schema=cerebrum name=voip_service]
WHERE entity_id = :entity_id
""", {"entity_id": int(self.entity_id)})
self.__super.delete()
# end delete
def find(self, entity_id):
"""Locate VoipService by its entity_id."""
self.__super.find(entity_id)
(self.description,
self.service_type,
self.ou_id) = self.query_1("""
SELECT description, service_type, ou_id
FROM [:table schema=cerebrum name=voip_service]
WHERE entity_id = :entity_id
""", {"entity_id": int(self.entity_id)})
self.__in_db = True
# end find
def search(self, entity_id=None, description=None, service_type=None,
ou_id=None):
"""Search for voip_services matching the filtering criteria."""
where = list()
binds = dict()
if entity_id is not None:
where.append(argument_to_sql(description, "vs.entity_id", binds,
int))
if description is not None:
where.append(argument_to_sql(description, "vs.description", binds))
if service_type is not None:
where.append(argument_to_sql(service_type, "vs.service_type",
binds, int))
if ou_id is not None:
where.append(argument_to_sql(ou_id, "vs.ou_id", binds, int))
if where:
where = "WHERE " + " AND ".join(where)
else:
where = ""
return self.query("""
SELECT entity_id, description, service_type, ou_id
FROM [:table schema=cerebrum name=voip_service] vs
""" + where, binds)
# end search
def search_voip_service_by_description(self, description, exact_match=False):
"""Locate voip_service by its description.
The match could be either exact, or approximate (default). In the
latter case, a substring search on the description is performed.
"""
if exact_match:
where = "description = :description"
binds = {"description": description}
else:
where = "description LIKE :description"
binds = {"description": "%" + description + "%"}
return self.query("""
SELECT entity_id, description, service_type, ou_id
FROM [:table schema=cerebrum name=voip_service]
WHERE """ + where, binds)
# end search_voip_service_by_description
# end voipService
| gpl-2.0 | -7,525,202,465,252,237,000 | 31.089005 | 88 | 0.593082 | false | 3.951644 | false | false | false |
PhoenixBureau/PigeonComputer | pigeon/xerblin/world.py | 1 | 12211 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright Β© 2012 Simon Forman
#
# This file is part of Pigeon Computer.
#
# Pigeon Computer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pigeon Computer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pigeon Computer. If not, see <http://www.gnu.org/licenses/>.
#
'''
World
=================================
This module defines two classes: Serializer and World. Serializer deals
with saving history protocols to a pickle file and World deals with
interacting with an interpreter in a loop.
This module also defines a subclass of World called HistoryListWorld that
tracks history in a list object, and a couple of very basic views.
Starting with an initial interpreter (comprised of a stack of nominal
data and a dictionary of commands and possibly named state "variables")
the user issues a command to the interpreter which then results in a new
state (which can be the same as the initial state.)
The interpreter is embedded in a frame (a World or subclass) which
provides view and history and serialization controls.
These frame "meta" controls are several:
Change the view function.
^^^^^^^^^^^^^^^^^^^^^^^^^
View functions take a state (interpreter, stack and dictionary) and
render it somehow for the user.
Implied but not necessary are means for direct manipulation of views
to send commands to the interpreter. It may well turn out that tight
feedback loops between views and modeled states will require a
relaxation of the strict functional style I'm striving for here.
In any event, views take a state and render it.
Views may render as text in a terminal or GUI text widget, "direct"
graphics via Pygame, etc., GUI systems such as Tkinter or wxPython,
document formats like Postscript or PDF, graphs using matplotlib or
other libraries in formats such as PNG or SVG, or web UIs using HTML,
CSS, Javascript, and perhaps even ActionScript (Flash).
Examine history and change to previous states.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The chief advantage of storing all history is in the ease of going
back and trying something new from a previous point in time.
This requires some means of indexing and displaying the states in
your history and your path through them so you can decide when to go
back to, and a way to actually go back and set your current state to
a previous state and add the transition to the history.
This seems to require another kind of view that can display not just
one state time-slice of your history, but also the tree-like path
from state to state you took.
(There is an infinite regression here: do you keep the history of the
changing of the history? What about the history of the keeping of
that history, do you keep that too? And that keeping's history?
Etc... In practice we are likely to make do with only one level of
history.)
In addition to simply stepping back and forth in time, we will also
want to "pull" data from different history states, possibly in
multiple stored pickle files, and combine them in a new synthesized
states. We can build new interpreters with the data and commands we
need to perform tasks and achieve our goals.
As yet, this script provides only indirect means to manipulate and view
history. You can use a HistoryListWorld and manipulate its history
attribute, which is a list containing all the states of the system in
order starting with the initial state.
Manipulate external save files.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The histories must be stored in python pickle files in some external
media (i.e. a disk drive or memory stick) and there are issues of
selecting and loading histories and managing the relations between
them.
Currently this script provides no direct means to do any of this.
You can pass a file name or open file object to the World as its
save_file argument and that will be used to save the pickle data as
you use the World object, but other than that you're on your own.
Code Documentation
^^^^^^^^^^^^^^^^^^
'''
import pickle, pprint, StringIO, os
from pigeon.xerblin.btree import fill_tree, items
from pigeon.xerblin.library import words
from pigeon.xerblin.base import interpret
def view0(I):
'''Print the stack to stdout with python's default formatting.'''
print repr(I[0])
print
def view1(I):
'''Print the stack to stdout using the pprint module.'''
pprint.pprint(I[0])
print
def nullView(I):
'''"Do nothing" view.'''
pass
# An initial interpreter to use for spawning worlds. It has an empty
# stack and whatever commands are defined by default in the library
# module.
ROOT = (), fill_tree((), words)
class World(object):
'''
Manage an interpreter, a view function, and serialization to a file
or file-like object.
This object takes a view function (any callable that accepts an
interpreter) and optionally an initial interpreter and a save file.
It creates a Serializer object to save commands and results, and it
provides a step() method that accepts a command (list of strings) and
runs it on the interpreter then saves it and calls the view function.
'''
def __init__(self, view=nullView, initial=None, save_file=None):
'''
Create a World object with the given view function.
Keyword arguments:
initial -- An interpreter to use as the initial state of the
system. It defaults to ROOT.
save_file -- A file or file-like object open for writing, or a
string file name which is then opened for writing. If not
given a StringIO object is used.
'''
if initial is None:
initial = ROOT
if save_file is None:
save_file = StringIO.StringIO()
elif isinstance(save_file, basestring):
save_file = open(save_file, 'w')
# If save_file isn't None or a string or unicode object then we
# assume it's a file-like object suitable for the Pickler.
assert hasattr(save_file, 'write'), repr(save_file)
self.serializer = Serializer(initial, save_file)
self._view = view
self.setCurrentState(initial)
def view(self, state):
self._view(state)
def step(self, command):
'''
Run one command, a list of strings, on the interpreter then save
the command and resulting new interpreter to the serialized
stream and call the view function on the new interpreter.
'''
I = self.getCurrentState()
# Run the command.
I = interpret(I, command)
# Record the resultant state.
self.setCurrentState(I)
# Save the command and its resultant state in the serializer. The
# history list in the HistoryListWorld subclass serves as a sort
# of cache on the contents of the serializer pickle stream. (The
# Pickler object's memo dict essentially keeps this cache for us
# but I don't want to introduce a bunch of tight coupling with
# the pickle medule, despite the fact that it is likely to be
# pretty stable.)
self.serializer.post(command, I)
def changeView(self, view):
'''
Swap the current view function for the one passed, return the old
view function. Calls the new view.
'''
view, self._view = self._view, view
self.view(self.getCurrentState())
return view
def setCurrentState(self, state):
'''
Sets current state to the passed state. This method exists
mostly to be overridden in subclasses.
'''
self.current = state
# Render the view.
self.view(state)
def getCurrentState(self):
'''
Return the current state. This method exists mostly to be
overridden in subclasses.
'''
return self.current
class HistoryListWorld(World):
'''
A subclass of World that overrides the setCurrentState() and
getCurrentState() methods to record states in a history list.
'''
def setCurrentState(self, state):
'''
Set current state to passed in state. Overrides super-class.
'''
try:
history = self.history
except AttributeError:
history = self.history = []
history.append(state)
self.view(state)
def getCurrentState(self):
'''
Return the current state. Overrides super-class.
'''
# Current state is just the most recent history.
return self.history[-1]
class Serializer:
'''
Combines a Pickler and a file or file-like object to track a linear
protocol of state -> command -> resultant-state history.
You instantiate it with an initial state (i.e. your "root" stack and
dictionary) and a file(-like object) to save to then you call post()
repeatedly with the next command and the resultant stack and
dictionary.
The Pickler keeps a memo dict of the objects it has seen and pickled
already and when it sees them again it serializes a reference to them
rather than the whole object, automatically providing a sort of
compression for the persistant datastructures we're storing in the
pickle stream.
You can open the pickle file and call load() repeatedly to get a
sequence of state, command, state, ... There will always be an odd
number of stored data: the initial state followed by zero or more
pairs of command and result state.
'''
def __init__(self, initial_state, stream):
self.stream = stream
self._setup_flushers(stream)
self.pickler = pickle.Pickler(stream)
self.pickler.dump(initial_state)
self.flush()
def post(self, command, resultant_state):
'''
Serialize command and resultant_state to the pickle stream. Each
is saved separately so you can load a command from the stream and
examine it without loading its resultant state.
'''
self.pickler.dump(command)
self.pickler.dump(resultant_state)
self.flush()
def flush(self):
for f in self._flushers:
f()
def _setup_flushers(self, stream):
self._flushers = []
if hasattr(stream, 'flush'):
self._flushers.append(stream.flush)
if hasattr(stream, 'fileno'):
self._flushers.append(lambda n=stream.fileno(): os.fsync(n))
# This is a proof-of-concept frame for interacting with
# a xerblin interpreter.
if __name__ == "__main__":
from xerblin.btree import items
# Create a world with the basic view function.
w = HistoryListWorld(view0)
# For convenience print out the commands in the dictionary at
# startup.
dictionary = w.getCurrentState()[1]
print ' '.join(name for name, value in items(dictionary))
print
# Drop into an event loop.
while True:
try:
# Get a command and split it up.
command = raw_input('> ').split()
except EOFError:
# User is done, quit.
break
# Run the command, the World object handles all the details for
# us.
w.step(command)
##
## Example run:
##
## ()
##
## NewBranchWord NewLoopWord NewSeqWord add drop dup inscribe listwords
## lookup mul over pick pickle rebalance sub swap tuck unpickle view
##
## > 23 18
## (18, (23, ()))
##
## > over
## (23, (18, (23, ())))
##
## > over
## (18, (23, (18, (23, ()))))
##
## > swap
## (23, (18, (18, (23, ()))))
##
## > sub
## (-5, (18, (23, ())))
##
## > drop
## (18, (23, ()))
##
## > over over sub
## (5, (18, (23, ())))
##
## > drop drop drop
## ()
##
## >
##
##
| gpl-3.0 | 9,156,978,142,784,432,000 | 31.473404 | 77 | 0.669451 | false | 4.168658 | false | false | false |
martazaryn/green-roofs-mappping | Green_Roof_MapPy/warsaw/migrations/0003_populate.py | 1 | 1460 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from warsaw.models import City, District
def populate(apps, schema_editor):
warsaw = City.objects.create(name="Warsaw")
District.objects.create(name="Wola", city=warsaw)
District.objects.create(name="ΕrΓ³dmieΕcie", city=warsaw)
District.objects.create(name="MokotΓ³w", city=warsaw)
District.objects.create(name="Ursus", city=warsaw)
District.objects.create(name="Bemowo", city=warsaw)
District.objects.create(name="Wawer", city=warsaw)
District.objects.create(name="Ochota", city=warsaw)
District.objects.create(name="WΕochy", city=warsaw)
District.objects.create(name="WesoΕa", city=warsaw)
District.objects.create(name="Bielany", city=warsaw)
District.objects.create(name="UrsynΓ³w", city=warsaw)
District.objects.create(name="WilanΓ³w", city=warsaw)
District.objects.create(name="Ε»oliborz", city=warsaw)
District.objects.create(name="TargΓ³wek", city=warsaw)
District.objects.create(name="BiaΕoΕΔka", city=warsaw)
District.objects.create(name="RembertΓ³w", city=warsaw)
District.objects.create(name="Praga PΓ³Εnoc", city=warsaw)
District.objects.create(name="Praga PoΕudnie", city=warsaw)
class Migration(migrations.Migration):
dependencies = [
('warsaw', '0002_auto_20170405_1252'),
]
operations = [
migrations.RunPython(populate),
]
| mit | 652,288,227,889,083,900 | 36.973684 | 63 | 0.717949 | false | 2.702247 | false | false | false |
FrodeSolheim/fs-uae-launcher | launcher/settings/monitorbutton.py | 1 | 2603 | from fsbc.application import app
from fsui import Image, ImageButton
from launcher.i18n import gettext
from launcher.launcher_settings import LauncherSettings
class MonitorButtonBase:
def __init__(self, parent, icons):
super().__init__(parent, icons)
self.icons = icons
self.monitor = ""
self.tooltip_text = gettext(
"Monitor to display the emulator on (left, "
"middle-left, middle-right, right)"
)
# self.on_setting("fullscreen", app.settings["fullscreen"], initial=True)
self.on_setting("monitor", app.settings["monitor"], initial=True)
LauncherSettings.add_listener(self)
def on_activate(self):
if self.monitor == "left":
app.settings["monitor"] = ""
elif self.monitor == "middle-left":
app.settings["monitor"] = "middle-right"
elif self.monitor == "middle-right":
app.settings["monitor"] = "right"
else:
app.settings["monitor"] = "left"
def onDestroy(self):
LauncherSettings.remove_listener(self)
super().onDestroy()
def on_setting(self, key, value, initial=False):
if key == "fullscreen":
# self.set_enabled(value == "1")
pass
elif key == "monitor":
if value == "left":
self.monitor = "left"
self.__set_image(self.icons[0])
elif value == "middle-right":
self.monitor = "middle-right"
self.__set_image(self.icons[2])
elif value == "right":
self.monitor = "right"
self.__set_image(self.icons[3])
else:
self.monitor = "middle-left"
self.__set_image(self.icons[1])
def __set_image(self, image, initial=False):
self.image = image
if not initial:
# pylint: disable=no-member
self.set_image(image)
class ButtonWrapper(ImageButton):
def __init__(self, parent, icons):
super().__init__(parent, icons[0])
class MonitorButton(MonitorButtonBase, ButtonWrapper):
def __init__(self, parent):
super().__init__(
parent,
[
Image("launcher:/data/16x16/monitor_left.png"),
Image("launcher:/data/16x16/monitor_middle_left.png"),
Image("launcher:/data/16x16/monitor_middle_right.png"),
Image("launcher:/data/16x16/monitor_right.png"),
],
)
self.set_tooltip(self.tooltip_text)
self.set_min_width(40)
| gpl-2.0 | 902,271,721,908,237,000 | 32.805195 | 81 | 0.552824 | false | 4.029412 | false | false | false |
gdietz/OpenMEE | interaction.py | 1 | 1181 | '''
Created on Jan 11, 2014
@author: George
'''
class Interaction:
def __init__(self, list_of_vars):
self.combination = set(list_of_vars)
def __str__(self):
covs_in_order = sorted(self.combination, key=lambda cov: cov.get_label())
cov_labels = [cov.get_label() for cov in covs_in_order]
return "*".join(cov_labels)
def __eq__(self, otherInteraction):
return self.combination == otherInteraction.combination
def __contains__(self, var):
return var in self.combination
def get_vars(self):
# sorted list of the interaction covariates (alphabetically)
return sorted(list(self.combination), key=lambda x: x.get_label())
def r_colon_name(self):
# name of the interaction with colon separtion (for putting in to a model formula in R)
# e.g. A:B
return ":".join([cov.get_label_R_compatible() for cov in self.get_vars()])
def r_vector_of_covs(self):
# R code for string vector of covs
# e.g. c("A","B")
quoted_names = ["\"%s\"" % cov for cov in self.get_vars()]
return "c(%s)" % ", ".join(quoted_names) | gpl-3.0 | -66,774,486,340,107,630 | 32.771429 | 95 | 0.590178 | false | 3.514881 | false | false | false |
Davenport-Physics/RelaxAuto | Src/main.py | 1 | 9214 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# main.py
#
# Copyright 2014 Michael Davenport <Davenport.physics@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from auto import *
from calls import *
from time import sleep
import threading
import os
HaltExecution = False
## @package main
#
# main file where the automation takes place. Usually this code
# will not be modified, and should not be modified unless you plan
# on editing the automation of the program itself.
"""
The main function doesn't have a lot of code to itself, and the reason
for this is because I wanted a program that could be applicable in
different ways, and populating the main function with commands would result
in some inherent difficulties with applying the script to different applications.
So the main function will normally just initialize an object and call a function.
It's a good function to use if you want to test any of the functions throughout
the script, to weed out bugs.
Aside that, by default it initializes an auto object, and calls the
run_automation function, passing a reference (memory address) to the auto object it
initialized
"""
def main():
obj = Auto()
t = threading.Thread(target=get_input)
t.start()
run_automation(obj)
os._exit(0)
return 0
"""
Handles user input, which allows the user to halt execution of the
program during runtime. Originally, the only way to exit the program
was to kill the task, which would require a second terminal to be opened.
It has support for both python 2.x and 3.x
"""
def get_input():
global HaltExecution
while HaltExecution == False:
try:
user = raw_input()
except:
user = str(input())
if user == "q":
HaltExecution = True
raise SystemExit
"""
run_automation will begin by determining whether the user wanted
the program to print out information in a verbose manner, hence the use
of the Verbose variable. It does this by calling get_attribute_by_name, and
passing "verbose" to it. get_attribute_by_name will search for an object
that has that name, and return a attribute of that object.
Then the program initializes an object Filename, which gets a single string
or a list of files to be deleted. This is dependent on what the
user provides in the autoinit config file. One object is delete_file_strict
which tells the program that if it sees a file that is verbatim that string
it should delete it. There is a list determined by the delete_file_which_contains
string in the config file, which returns a list of any file that has a portion
of that string in it's filename
PreviousVolume is a object that is initialized to 0.0. It's used to determine
whether an "pseudo-error" has happened. I say pseudo-error, because it's
not an error in the traditional sense, yet it makes the same function
call as if there was an error. The same action happens. mv CONTCAR POSCAR
So the error is determined by whether the previous volume and the current
volume are the same. If they are, then there is a "pseudo-error".
The code then goes into a for loop, which runs a maximum of iterations
defined by the value in the max_iterations object. By default it's 10, if
the config file failed to find a max_iterations attribute
the for loop initially makes a function call to make_bsub_job, which
calls the command line command "bsub<job" which submit's a job to the
computer cluster.
In while loop, it checks to see if the job is done. This is determined
by making a function call to check_if_job_finished, which determines
whether the queue has a string that is verbatim to the username
defined in the config file. The function returns false, if it finds that
the queue still has a string with the user's username.
The program then checks for errors, by making a function call to check_for_errors.
Afterwards it deletes any files that the user has specified in the config file.
The program is not required to delete any files.
At the very end, it checks to volume difference and whether the program had
any errors. In the event that there wasn't any errors, and the volume difference
is less than or equal to what the user defined in the config file, the
code will break and the program will halt execution, informing the user
that the automated relaxation was finished.
"""
## The primary function for running the volume automation logic
#
# @param Auto Object variable
#
# This function should only ever be called once during the entire
# execution of the program.
def run_automation(obj):
global HaltExecution
Verbose = obj.get_attribute_by_name("verbose")
#Filename will hold the string of file/s to be deleted after
#each iteration
Filename = obj.get_files_to_be_deleted()
PreviousVolume = 0.0
for x in range(obj.get_attribute_by_name("max_iterations")):
make_bsub_job(obj.get_attribute_by_name("jobfile"))
if Verbose == True:
print("Created Job, waiting to finish.")
interval = 0
#Waits for the job to be finished
while obj.check_if_job_finished() == False:
if HaltExecution == True:
raise SystemExit
sleep(1)
interval += 1
if Verbose == True and interval % 20 == 0:
print("%d seconds have passed" % (interval))
if Verbose == True:
print("Job is finished")
HadErrors = check_for_errors(obj)
if obj.get_delete_type() == 1:
delete_file(Filename)
elif obj.get_delete_type() == 2:
if delete_file_which_contains_string(Filename) == True and Verbose == True:
print("Successfully deleted files which contained the string %s" % (Filename))
#Once the job is finished, it checks the minimum volume difference
#If the difference has been met, it breaks the for loop.
if check_volume_difference(obj) == True and HadErrors == False:
break;
if get_volume_difference(get_volumes(obj)) == PreviousVolume:
if obj.get_attribute_by_name("verbose") == True:
print("Running command " + obj.get_attribute_by_name("do_when_error") + " because volume difference did not change")
make_call_with_string(obj.get_attribute_by_name("do_when_error"))
PreviousVolume = get_volume_difference(get_volumes(obj))
print("Iteration %d complete \n\n" % (x + 1))
print("Automated Relaxation finished")
HaltExecution = True
"""
check_for_errors begins by determining whether the value returned by
get_attribute_by_name is a string type. If it is, this is
the error the program will be looking for, when it calls grep.
In the event that it is a string type, the function follows by obtaining a
string of expect file names if "error" is passed to get_attribute_by_name,
then this function will return a name that will not be verbatim to
any file. This name is then passed to get_files_which_contain_string,
which returns a list of files that have the string within their filename.
From there determine_most_recent_file is called, and is passed this list.
It determines which of the file that's located within the list, is the newest
, as in has the greatest time stamp value. This is in the event that the user
decides not provide a delete_file attribute.
Then the function calls call_grep passing CheckError and the NewestFile variables.
call_grep will return a list of strings.
If the error is located within the list of strings, then the function
will call make_call_with_string, and will pass the attribute of the do_when_error
object. By default this is mv CONTCAR POSCAR
"""
## Checks for errors within the volume relaxation automation
#
# @param Auto object variable
#
def check_for_errors(obj):
CheckError = obj.get_attribute_by_name("error")
if type(CheckError) is str:
File = obj.get_attribute_by_name("error_file")
NewestFile = determine_most_recent_file(get_files_which_contain_string(File))
hold = call_grep(CheckError, NewestFile)
for x in hold:
if obj.check_for_error(x) == True:
make_call_with_string(obj.get_attribute_by_name("do_when_error"))
return True
return False
def print_lines(lines):
for x in lines:
print(x)
if __name__ == '__main__':
main()
| mit | -5,926,839,145,691,837,000 | 30.447099 | 120 | 0.735837 | false | 3.751629 | true | false | false |
tturowski/gwide | gwide/scripts/sam2deletions.py | 1 | 2149 | #!/usr/bin/env python
import sys, re
import pandas as pd
filepath = sys.argv[1]
df1_temp = pd.read_csv(filepath, sep="\t", names=['QNAME','FLAG','RNAME','POS','MAPQ','CIGAR','RNEXT','PNEXT','TLEN','SEQ','QUAL'])
df2_comments = df1_temp[df1_temp['QNAME'].str.contains('^@')] #header line start with @
df3_deletions = df1_temp[~df1_temp['QNAME'].str.contains('^@') & df1_temp['CIGAR'].str.contains('D')] #selecting only reads with deletions
df4_single_dels = df3_deletions[df3_deletions['CIGAR'].str.contains('1D')] #only single deletions; will accept 11D and 21D - to be improoved
def del_position(CIGAR = str()):
'''Takes reference and data DataFrame's
CIGAR : str
CIGAR string from SAM file to be parsed
Returns
-------
position of 1st deletion in SAM read
'''
CIGAR_elements = re.findall(r'\d{1,2}[A-Z]',CIGAR)
first_left_match = re.search(r'\d{1,2}M',CIGAR).group(0)
first_del = re.search(r'\d{1,2}D',CIGAR).group(0)
#selecting elements between fist MATCH and first DELETION
range_of_elem = [no for no, elem in enumerate(CIGAR_elements) if elem == first_left_match or elem == first_del]
#adding
to_add_elem = CIGAR_elements[range_of_elem[0]:range_of_elem[1]] #taking positions of numbers to pick up range
nt_to_add = [int(re.search(r'\d{1,2}',i).group(0)) for i in to_add_elem] #extracting numbers
return sum(nt_to_add)
s1_to_first_del = df4_single_dels['CIGAR'].apply(del_position)
#preparing output file
df5_dels = df4_single_dels.copy()
df5_dels['POS'] = df5_dels['POS'].astype(int) + s1_to_first_del
df5_dels['CIGAR'] = "1D" #mandatory - samtools consider reads without CIGAR as unmapped
df5_dels['SEQ'] = "*"
df5_dels['FLAG'] = df5_dels['FLAG'].astype(int)
df5_dels['MAPQ'] = df5_dels['MAPQ'].astype(int)
df5_dels['PNEXT'] = df5_dels['PNEXT'].astype(int)
df5_dels['TLEN'] = df5_dels['TLEN'].astype(int)
#saving header
df2_comments.to_csv(filepath.replace(".sam","_DEL_ONLY.sam"), index=None, header=None, sep="\t")
#saving reads
with open(filepath.replace(".sam","_DEL_ONLY.sam"), 'a') as output_file:
df5_dels.to_csv(output_file, index=None, header=None, sep="\t") | apache-2.0 | 2,566,409,490,479,103,500 | 45.73913 | 140 | 0.672406 | false | 2.659653 | false | false | false |
abutcher/Taboot | taboot/util.py | 1 | 3800 | # -*- coding: utf-8 -*-
# Taboot - Client utility for performing deployments with Func.
# Copyright Β© 2009-2011, Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
def resolve_types(ds, relative_to):
"""
Recursively translate string representation of a type within a
datastructure into an actual type instance.
:Parameters:
- `ds`: An arbitrary datastructure. Within `ds`, if a dict key named
`type` is encountered, the string contained there is replaced with the
actual type named.
- `relative_to`: The prefix which types are relative to; used during
import. As an example, if `relative_to`='taboot.tasks' and `ds`
contains a `type` key `command.Run`, then the type is imported as
`taboot.tasks.command.Run`.
"""
__import__(relative_to)
if isinstance(ds, list):
result = []
for item in ds:
result.append(resolve_types(item, relative_to))
return result
elif isinstance(ds, dict):
result = {}
for k, v in ds.iteritems():
if k == 'type':
tokens = v.split('.')
if len(tokens) == 1:
result[k] = getattr(sys.modules[relative_to], tokens[0])
else:
pkg = "%s.%s" % (relative_to, tokens[0])
__import__(pkg)
result[k] = getattr(sys.modules[pkg], tokens[1])
else:
result[k] = resolve_types(v, relative_to)
return result
else:
return ds
def instantiator(type_blob, relative_to, **kwargs):
"""
Instantiate a type, which is defined by a type blob in the
following format:
- If no paremeters are required for the type, then the blob
should be a single string describing the desired type
- If parameters are required, then the type blob must be a
dictionary with only one key that is a string describing
the desired type. The value associated with this key
should be dictionary which maps the parameter:value pairs
required when instantiating the type.
Returns the instantiated object.
"""
__import__(relative_to)
def str2type(s):
import sys
tokens = s.split('.')
if len(tokens) == 1:
return getattr(sys.modules[relative_to], tokens[0])
else:
pkg = "%s.%s" % (relative_to, tokens[0])
__import__(pkg)
return getattr(sys.modules[pkg], tokens[1])
if isinstance(type_blob, basestring):
instance_type = str2type(type_blob)
else:
if len(type_blob.keys()) != 1:
raise Exception("Number of keys isn't 1")
instance_type = str2type(type_blob.keys()[0])
kwargs.update(type_blob[type_blob.keys()[0]])
try:
return instance_type(**kwargs)
except TypeError, e:
import pprint
print "Unable to instantiate %s with the following arguments:"\
% instance_type
pprint.pprint(kwargs)
print "Full backtrace below\n"
raise
def log_update(msg):
sys.stderr.write(str(msg) + "\n")
sys.stderr.flush()
| gpl-3.0 | -991,396,593,952,565,100 | 33.536364 | 78 | 0.615162 | false | 4.133841 | false | false | false |
euripedesrocha/gizflo | gizflo/extintf/_sdram.py | 2 | 2364 |
from ._clock import Clock
from ._extintf import _extintf
class SDRAM(_extintf):
"""
Define the interface to SDRAMs. This includes single and
double data rate SDRAMs.
"""
# the timing atrribute captures all the time fields of the
# SDRAM controller. These are used to setup the constraints
# as well as being passed to the SDRAM controller HDL.
# All timing fields are in nano-seconds.
default_timing = {
'init': 200000.0, # min init interval
'ras': 45.0, # min interval between active precharge commands
'rcd': 20.0, # min interval between active R/W commands
'ref': 64000000.0, # max refresh interval
'rfc': 65.0, # refresh operation duration
'rp': 20.0, # min precharge command duration
'xsr': 75.0, # exit self-refresh time
}
default_ports = {
'data': None, # D0-D15, data inputs
'addr': None, # A0-A12, address inputs
'dqs': None, # data strobe
'udqs': None, # data strobe, upper byte
'ldqs': None, # data strobe, lower byte
'dqm': None, # data mask
'udqm': None, # upper data mask
'ldqm': None, # lower data mask
'bs': None, # bank select (bs == ba)
'ba': None, # bank address
'ras': None, # row refresh, normally active-low
'cas': None, # column refresh, normally active-low
'we': None, # write-enable, normally active-low
'cs': None, # chip-select, normally active-low
'cke': None, # clock enable, normally active-high
'clk': None,
'clkp': None, # ddr positive clock
'clkn': None, # ddr negative clock
}
def __init__(self, *ports, **params):
# is this DDR SDRAM or SDR SDRAM (ddr=0)
if 'ddr' in params:
self.ddr = params['ddr']
else:
self.ddr = 0
# walk through the ports and update
self._ports = dict(self.default_ports)
for pp in ports:
self._ports[pp.name] = pp
# misc parameters
self._params = dict(params)
# update any of the timing information
self._timing = dict(self.default_timing)
for k,v in params.items():
if k in self._timing:
self._timing[k] = v
| lgpl-3.0 | -4,910,137,506,969,989,000 | 32.771429 | 76 | 0.554992 | false | 3.560241 | false | false | false |
sakishinoda/tf-ssl | vat_ladder/config.py | 1 | 1480 | from argparse import Namespace
p = Namespace()
p.id = "full"
p.logdir = "train/mlpgamma/"
p.ckptdir = "train/mlpgamma/"
p.write_to = "description"
p.do_not_save = None
p.verbose = True
p.dataset = "mnist"
p.input_size = 784
p.test_frequency_in_epochs = 5
p.validation = 0
p.tb = False
p.which_gpu = 0
p.seed = 8340
p.end_epoch = 150
p.num_labeled = 100
p.batch_size = 100
p.ul_batch_size = 100
p.initial_learning_rate = 0.002
p.decay_start = 0.67
p.lr_decay_frequency = 5
p.beta1 = 0.9
p.beta1_during_decay = 0.9
p.encoder_layers = "1000-500-250-250-250-10"
p.corrupt_sd = 0.2
p.rc_weights = "0-0-0-0-0-0-10"
p.static_bn = 0.99
p.lrelu_a = 0.1
p.top_bn = False
p.epsilon = "5.0" # must be
p.num_power_iters = 3
p.xi = 1e-6
p.vadv_sd = 0.5
p.model = "gamma"
p.measure_smoothness = False
p.measure_vat = False
p.cnn = False
p.cnn_layer_types = \
"c-c-c-max-c-c-c-max-c-c-c-avg-fc"
p.cnn_fan = \
"3-96-96-96-96-192-192-192-192-192-192-192-192-10"
p.cnn_ksizes = \
"3-3-3-3-3-3-3-3-3-1-1-0-0"
p.cnn_strides = \
"1-1-1-2-1-1-1-2-1-1-1-0-0"
p.cnn_dims = \
"32-32-32-32-16-16-16-16-8-8-8-8-1"
| mit | 2,086,109,220,761,961,700 | 24.084746 | 54 | 0.477027 | false | 2.266462 | false | true | false |
jinzishuai/learn2deeplearn | deeplearning.ai/C4.CNN/week1_foundations/hw/hw1a.py | 1 | 38303 | #!/usr/bin/python3
# coding: utf-8
# # Convolutional Neural Networks: Step by Step
#
# Welcome to Course 4's first assignment! In this assignment, you will implement convolutional (CONV) and pooling (POOL) layers in numpy, including both forward propagation and (optionally) backward propagation.
#
# **Notation**:
# - Superscript $[l]$ denotes an object of the $l^{th}$ layer.
# - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.
#
#
# - Superscript $(i)$ denotes an object from the $i^{th}$ example.
# - Example: $x^{(i)}$ is the $i^{th}$ training example input.
#
#
# - Lowerscript $i$ denotes the $i^{th}$ entry of a vector.
# - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$, assuming this is a fully connected (FC) layer.
#
#
# - $n_H$, $n_W$ and $n_C$ denote respectively the height, width and number of channels of a given layer. If you want to reference a specific layer $l$, you can also write $n_H^{[l]}$, $n_W^{[l]}$, $n_C^{[l]}$.
# - $n_{H_{prev}}$, $n_{W_{prev}}$ and $n_{C_{prev}}$ denote respectively the height, width and number of channels of the previous layer. If referencing a specific layer $l$, this could also be denoted $n_H^{[l-1]}$, $n_W^{[l-1]}$, $n_C^{[l-1]}$.
#
# We assume that you are already familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started!
# ## 1 - Packages
#
# Let's first import all the packages that you will need during this assignment.
# - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.
# - [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.
# - np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
# In[14]:
import numpy as np
import h5py
import matplotlib.pyplot as plt
#get_ipython().magic('matplotlib inline')
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
#get_ipython().magic('load_ext autoreload')
#get_ipython().magic('autoreload 2')
np.random.seed(1)
# ## 2 - Outline of the Assignment
#
# You will be implementing the building blocks of a convolutional neural network! Each function you will implement will have detailed instructions that will walk you through the steps needed:
#
# - Convolution functions, including:
# - Zero Padding
# - Convolve window
# - Convolution forward
# - Convolution backward (optional)
# - Pooling functions, including:
# - Pooling forward
# - Create mask
# - Distribute value
# - Pooling backward (optional)
#
# This notebook will ask you to implement these functions from scratch in `numpy`. In the next notebook, you will use the TensorFlow equivalents of these functions to build the following model:
#
# <img src="images/model.png" style="width:800px;height:300px;">
#
# **Note** that for every forward function, there is its corresponding backward equivalent. Hence, at every step of your forward module you will store some parameters in a cache. These parameters are used to compute gradients during backpropagation.
# ## 3 - Convolutional Neural Networks
#
# Although programming frameworks make convolutions easy to use, they remain one of the hardest concepts to understand in Deep Learning. A convolution layer transforms an input volume into an output volume of different size, as shown below.
#
# <img src="images/conv_nn.png" style="width:350px;height:200px;">
#
# In this part, you will build every step of the convolution layer. You will first implement two helper functions: one for zero padding and the other for computing the convolution function itself.
# ### 3.1 - Zero-Padding
#
# Zero-padding adds zeros around the border of an image:
#
# <img src="images/PAD.png" style="width:600px;height:400px;">
# <caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **Zero-Padding**<br> Image (3 channels, RGB) with a padding of 2. </center></caption>
#
# The main benefits of padding are the following:
#
# - It allows you to use a CONV layer without necessarily shrinking the height and width of the volumes. This is important for building deeper networks, since otherwise the height/width would shrink as you go to deeper layers. An important special case is the "same" convolution, in which the height/width is exactly preserved after one layer.
#
# - It helps us keep more of the information at the border of an image. Without padding, very few values at the next layer would be affected by pixels as the edges of an image.
#
# **Exercise**: Implement the following function, which pads all the images of a batch of examples X with zeros. [Use np.pad](https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html). Note if you want to pad the array "a" of shape $(5,5,5,5,5)$ with `pad = 1` for the 2nd dimension, `pad = 3` for the 4th dimension and `pad = 0` for the rest, you would do:
# ```python
# a = np.pad(a, ((0,0), (1,1), (0,0), (3,3), (0,0)), 'constant', constant_values = (..,..))
# ```
# In[15]:
# GRADED FUNCTION: zero_pad
def zero_pad(X, pad):
"""
Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image,
as illustrated in Figure 1.
Argument:
X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images
pad -- integer, amount of padding around each image on vertical and horizontal dimensions
Returns:
X_pad -- padded image of shape (m, n_H + 2*pad, n_W + 2*pad, n_C)
"""
### START CODE HERE ### (β 1 line)
#X_pad = np.pad(X,((0,0),(pad,pad),(pad,pad),(0,0)),'constant', constant_values=((0,0),(0,0),(0,0),(0,0)))
X_pad = np.pad(X,((0,0),(pad,pad),(pad,pad),(0,0)),'constant', constant_values=0)
### END CODE HERE ###
return X_pad
# In[16]:
np.random.seed(1)
x = np.random.randn(4, 3, 3, 2)
x_pad = zero_pad(x, 2)
print ("x.shape =", x.shape)
print ("x_pad.shape =", x_pad.shape)
print ("x[1,1] =", x[1,1])
print ("x_pad[1,1] =", x_pad[1,1])
fig, axarr = plt.subplots(1, 2)
axarr[0].set_title('x')
axarr[0].imshow(x[0,:,:,0])
axarr[1].set_title('x_pad')
axarr[1].imshow(x_pad[0,:,:,0])
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **x.shape**:
# </td>
# <td>
# (4, 3, 3, 2)
# </td>
# </tr>
# <tr>
# <td>
# **x_pad.shape**:
# </td>
# <td>
# (4, 7, 7, 2)
# </td>
# </tr>
# <tr>
# <td>
# **x[1,1]**:
# </td>
# <td>
# [[ 0.90085595 -0.68372786]
# [-0.12289023 -0.93576943]
# [-0.26788808 0.53035547]]
# </td>
# </tr>
# <tr>
# <td>
# **x_pad[1,1]**:
# </td>
# <td>
# [[ 0. 0.]
# [ 0. 0.]
# [ 0. 0.]
# [ 0. 0.]
# [ 0. 0.]
# [ 0. 0.]
# [ 0. 0.]]
# </td>
# </tr>
#
# </table>
# ### 3.2 - Single step of convolution
#
# In this part, implement a single step of convolution, in which you apply the filter to a single position of the input. This will be used to build a convolutional unit, which:
#
# - Takes an input volume
# - Applies a filter at every position of the input
# - Outputs another volume (usually of different size)
#
# <img src="images/Convolution_schematic.gif" style="width:500px;height:300px;">
# <caption><center> <u> <font color='purple'> **Figure 2** </u><font color='purple'> : **Convolution operation**<br> with a filter of 2x2 and a stride of 1 (stride = amount you move the window each time you slide) </center></caption>
#
# In a computer vision application, each value in the matrix on the left corresponds to a single pixel value, and we convolve a 3x3 filter with the image by multiplying its values element-wise with the original matrix, then summing them up. In this first step of the exercise, you will implement a single step of convolution, corresponding to applying a filter to just one of the positions to get a single real-valued output.
#
# Later in this notebook, you'll apply this function to multiple positions of the input to implement the full convolutional operation.
#
# **Exercise**: Implement conv_single_step(). [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.sum.html).
#
# In[17]:
# GRADED FUNCTION: conv_single_step
def conv_single_step(a_slice_prev, W, b):
"""
Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation
of the previous layer.
Arguments:
a_slice_prev -- slice of input data of shape (f, f, n_C_prev)
W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)
b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)
Returns:
Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data
"""
### START CODE HERE ### (β 2 lines of code)
# Element-wise product between a_slice and W. Add bias.
s = a_slice_prev * W + b
# Sum over all entries of the volume s
Z = np.sum(s)
### END CODE HERE ###
return Z
# In[18]:
np.random.seed(1)
a_slice_prev = np.random.randn(4, 4, 3)
W = np.random.randn(4, 4, 3)
b = np.random.randn(1, 1, 1)
Z = conv_single_step(a_slice_prev, W, b)
print("Z =", Z)
# **Expected Output**:
# <table>
# <tr>
# <td>
# **Z**
# </td>
# <td>
# -23.1602122025
# </td>
# </tr>
#
# </table>
# ### 3.3 - Convolutional Neural Networks - Forward pass
#
# In the forward pass, you will take many filters and convolve them on the input. Each 'convolution' gives you a 2D matrix output. You will then stack these outputs to get a 3D volume:
#
# <center>
# <video width="620" height="440" src="images/conv_kiank.mp4" type="video/mp4" controls>
# </video>
# </center>
#
# **Exercise**: Implement the function below to convolve the filters W on an input activation A_prev. This function takes as input A_prev, the activations output by the previous layer (for a batch of m inputs), F filters/weights denoted by W, and a bias vector denoted by b, where each filter has its own (single) bias. Finally you also have access to the hyperparameters dictionary which contains the stride and the padding.
#
# **Hint**:
# 1. To select a 2x2 slice at the upper left corner of a matrix "a_prev" (shape (5,5,3)), you would do:
# ```python
# a_slice_prev = a_prev[0:2,0:2,:]
# ```
# This will be useful when you will define `a_slice_prev` below, using the `start/end` indexes you will define.
# 2. To define a_slice you will need to first define its corners `vert_start`, `vert_end`, `horiz_start` and `horiz_end`. This figure may be helpful for you to find how each of the corner can be defined using h, w, f and s in the code below.
#
# <img src="images/vert_horiz_kiank.png" style="width:400px;height:300px;">
# <caption><center> <u> <font color='purple'> **Figure 3** </u><font color='purple'> : **Definition of a slice using vertical and horizontal start/end (with a 2x2 filter)** <br> This figure shows only a single channel. </center></caption>
#
#
# **Reminder**:
# The formulas relating the output shape of the convolution to the input shape is:
# $$ n_H = \lfloor \frac{n_{H_{prev}} - f + 2Β \times pad}{stride} \rfloor +1 $$
# $$ n_W = \lfloor \frac{n_{W_{prev}} - f + 2Β \times pad}{stride} \rfloor +1 $$
# $$ n_C = \text{number of filters used in the convolution}$$
#
# For this exercise, we won't worry about vectorization, and will just implement everything with for-loops.
# In[19]:
# GRADED FUNCTION: conv_forward
def conv_forward(A_prev, W, b, hparameters):
"""
Implements the forward propagation for a convolution function
Arguments:
A_prev -- output activations of the previous layer, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)
b -- Biases, numpy array of shape (1, 1, 1, n_C)
hparameters -- python dictionary containing "stride" and "pad"
Returns:
Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)
cache -- cache of values needed for the conv_backward() function
"""
### START CODE HERE ###
# Retrieve dimensions from A_prev's shape (β1 line)
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve dimensions from W's shape (β1 line)
(f, f, n_C_prev, n_C) = W.shape
# Retrieve information from "hparameters" (β2 lines)
stride = hparameters["stride"]
pad = hparameters["pad"]
# Compute the dim(n_H_prev-f+2*pad)/stridensions of the CONV output volume using the formula given above. Hint: use int() to floor. (β2 lines)
n_H = int(np.floor((n_H_prev-f+2*pad)/stride + 1))
n_W = int(np.floor((n_W_prev-f+2*pad)/stride + 1))
# Initialize the output volume Z with zeros. (β1 line)
Z = np.zeros((m, n_H, n_W, n_C))
# Create A_prev_pad by padding A_prev
A_prev_pad = zero_pad(A_prev, pad)
for i in range(m): # loop over the batch of training examples
a_prev_pad = A_prev_pad[i,:,:,:] # Select ith training example's padded activation
for h in range(n_H): # loop over vertical axis of the output volume
for w in range(n_W): # loop over horizontal axis of the output volume
for c in range(n_C): # loop over channels (= #filters) of the output volume
# Find the corners of the current "slice" (β4 lines)
vert_start = h*stride
vert_end = vert_start + f
horiz_start = w*stride
horiz_end = horiz_start + f
# Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (β1 line)
a_slice_prev = a_prev_pad[vert_start:vert_end,horiz_start:horiz_end,:]
# Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (β1 line)
Z[i, h, w, c] = conv_single_step(a_slice_prev, W[:,:,:,c], b[:,:,:,c])
### END CODE HERE ###
# Making sure your output shape is correct
assert(Z.shape == (m, n_H, n_W, n_C))
# Save information in "cache" for the backprop
cache = (A_prev, W, b, hparameters)
return Z, cache
# In[20]:
np.random.seed(1)
A_prev = np.random.randn(10,4,4,3)
W = np.random.randn(2,2,3,8)
b = np.random.randn(1,1,1,8)
hparameters = {"pad" : 2,
"stride": 1}
Z, cache_conv = conv_forward(A_prev, W, b, hparameters)
print("Z's mean =", np.mean(Z))
print("cache_conv[0][1][2][3] =", cache_conv[0][1][2][3])
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **Z's mean**
# </td>
# <td>
# 0.155859324889
# </td>
# </tr>
# <tr>
# <td>
# **cache_conv[0][1][2][3]**
# </td>
# <td>
# [-0.20075807 0.18656139 0.41005165]
# </td>
# </tr>
#
# </table>
#
# Finally, CONV layer should also contain an activation, in which case we would add the following line of code:
#
# ```python
# # Convolve the window to get back one output neuron
# Z[i, h, w, c] = ...
# # Apply activation
# A[i, h, w, c] = activation(Z[i, h, w, c])
# ```
#
# You don't need to do it here.
#
# ## 4 - Pooling layer
#
# The pooling (POOL) layer reduces the height and width of the input. It helps reduce computation, as well as helps make feature detectors more invariant to its position in the input. The two types of pooling layers are:
#
# - Max-pooling layer: slides an ($f, f$) window over the input and stores the max value of the window in the output.
#
# - Average-pooling layer: slides an ($f, f$) window over the input and stores the average value of the window in the output.
#
# <table>
# <td>
# <img src="images/max_pool1.png" style="width:500px;height:300px;">
# <td>
#
# <td>
# <img src="images/a_pool.png" style="width:500px;height:300px;">
# <td>
# </table>
#
# These pooling layers have no parameters for backpropagation to train. However, they have hyperparameters such as the window size $f$. This specifies the height and width of the fxf window you would compute a max or average over.
#
# ### 4.1 - Forward Pooling
# Now, you are going to implement MAX-POOL and AVG-POOL, in the same function.
#
# **Exercise**: Implement the forward pass of the pooling layer. Follow the hints in the comments below.
#
# **Reminder**:
# As there's no padding, the formulas binding the output shape of the pooling to the input shape is:
# $$ n_H = \lfloor \frac{n_{H_{prev}} - f}{stride} \rfloor +1 $$
# $$ n_W = \lfloor \frac{n_{W_{prev}} - f}{stride} \rfloor +1 $$
# $$ n_C = n_{C_{prev}}$$
# In[21]:
# GRADED FUNCTION: pool_forward
def pool_forward(A_prev, hparameters, mode = "max"):
"""
Implements the forward pass of the pooling layer
Arguments:
A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
hparameters -- python dictionary containing "f" and "stride"
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C)
cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters
"""
# Retrieve dimensions from the input shape
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve hyperparameters from "hparameters"
f = hparameters["f"]
stride = hparameters["stride"]
# Define the dimensions of the output
n_H = int(1 + (n_H_prev - f) / stride)
n_W = int(1 + (n_W_prev - f) / stride)
n_C = n_C_prev
# Initialize output matrix A
A = np.zeros((m, n_H, n_W, n_C))
### START CODE HERE ###
for i in range(m): # loop over the training examples
for h in range(n_H): # loop on the vertical axis of the output volume
for w in range(n_W): # loop on the horizontal axis of the output volume
for c in range (n_C): # loop over the channels of the output volume
# Find the corners of the current "slice" (β4 lines)
vert_start = h*stride
vert_end = vert_start + f
horiz_start = w*stride
horiz_end = horiz_start + f
# Use the corners to define the current slice on the ith training example of A_prev, channel c. (β1 line)
a_prev_slice = A_prev[i, vert_start:vert_end,horiz_start:horiz_end,c]
# Compute the pooling operation on the slice. Use an if statment to differentiate the modes. Use np.max/np.mean.
if mode == "max":
A[i, h, w, c] = np.max(a_prev_slice)
elif mode == "average":
A[i, h, w, c] = np.average(a_prev_slice)
### END CODE HERE ###
# Store the input and hparameters in "cache" for pool_backward()
cache = (A_prev, hparameters)
# Making sure your output shape is correct
assert(A.shape == (m, n_H, n_W, n_C))
return A, cache
# In[22]:
np.random.seed(1)
A_prev = np.random.randn(2, 4, 4, 3)
hparameters = {"stride" : 1, "f": 4}
A, cache = pool_forward(A_prev, hparameters)
print("mode = max")
print("A =", A)
print()
A, cache = pool_forward(A_prev, hparameters, mode = "average")
print("mode = average")
print("A =", A)
# **Expected Output:**
# <table>
#
# <tr>
# <td>
# A =
# </td>
# <td>
# [[[[ 1.74481176 1.6924546 2.10025514]]] <br/>
#
#
# [[[ 1.19891788 1.51981682 2.18557541]]]]
#
# </td>
# </tr>
# <tr>
# <td>
# A =
# </td>
# <td>
# [[[[-0.09498456 0.11180064 -0.14263511]]] <br/>
#
#
# [[[-0.09525108 0.28325018 0.33035185]]]]
#
# </td>
# </tr>
#
# </table>
#
# Congratulations! You have now implemented the forward passes of all the layers of a convolutional network.
#
# The remainer of this notebook is optional, and will not be graded.
#
# ## 5 - Backpropagation in convolutional neural networks (OPTIONAL / UNGRADED)
#
# In modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers don't need to bother with the details of the backward pass. The backward pass for convolutional networks is complicated. If you wish however, you can work through this optional portion of the notebook to get a sense of what backprop in a convolutional network looks like.
#
# When in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in convolutional neural networks you can to calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are not trivial and we did not derive them in lecture, but we briefly presented them below.
#
# ### 5.1 - Convolutional layer backward pass
#
# Let's start by implementing the backward pass for a CONV layer.
#
# #### 5.1.1 - Computing dA:
# This is the formula for computing $dA$ with respect to the cost for a certain filter $W_c$ and a given training example:
#
# $$ dA += \sum _{h=0} ^{n_H} \sum_{w=0} ^{n_W} W_c \times dZ_{hw} \tag{1}$$
#
# Where $W_c$ is a filter and $dZ_{hw}$ is a scalar corresponding to the gradient of the cost with respect to the output of the conv layer Z at the hth row and wth column (corresponding to the dot product taken at the ith stride left and jth stride down). Note that at each time, we multiply the the same filter $W_c$ by a different dZ when updating dA. We do so mainly because when computing the forward propagation, each filter is dotted and summed by a different a_slice. Therefore when computing the backprop for dA, we are just adding the gradients of all the a_slices.
#
# In code, inside the appropriate for-loops, this formula translates into:
# ```python
# da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]
# ```
#
# #### 5.1.2 - Computing dW:
# This is the formula for computing $dW_c$ ($dW_c$ is the derivative of one filter) with respect to the loss:
#
# $$ dW_c += \sum _{h=0} ^{n_H} \sum_{w=0} ^ {n_W} a_{slice} \times dZ_{hw} \tag{2}$$
#
# Where $a_{slice}$ corresponds to the slice which was used to generate the acitivation $Z_{ij}$. Hence, this ends up giving us the gradient for $W$ with respect to that slice. Since it is the same $W$, we will just add up all such gradients to get $dW$.
#
# In code, inside the appropriate for-loops, this formula translates into:
# ```python
# dW[:,:,:,c] += a_slice * dZ[i, h, w, c]
# ```
#
# #### 5.1.3 - Computing db:
#
# This is the formula for computing $db$ with respect to the cost for a certain filter $W_c$:
#
# $$ db = \sum_h \sum_w dZ_{hw} \tag{3}$$
#
# As you have previously seen in basic neural networks, db is computed by summing $dZ$. In this case, you are just summing over all the gradients of the conv output (Z) with respect to the cost.
#
# In code, inside the appropriate for-loops, this formula translates into:
# ```python
# db[:,:,:,c] += dZ[i, h, w, c]
# ```
#
# **Exercise**: Implement the `conv_backward` function below. You should sum over all the training examples, filters, heights, and widths. You should then compute the derivatives using formulas 1, 2 and 3 above.
# In[36]:
def conv_backward(dZ, cache):
"""
Implement the backward propagation for a convolution function
Arguments:
dZ -- gradient of the cost with respect to the output of the conv layer (Z), numpy array of shape (m, n_H, n_W, n_C)
cache -- cache of values needed for the conv_backward(), output of conv_forward()
Returns:
dA_prev -- gradient of the cost with respect to the input of the conv layer (A_prev),
numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
dW -- gradient of the cost with respect to the weights of the conv layer (W)
numpy array of shape (f, f, n_C_prev, n_C)
db -- gradient of the cost with respect to the biases of the conv layer (b)
numpy array of shape (1, 1, 1, n_C)
"""
### START CODE HERE ###
# Retrieve information from "cache"
(A_prev, W, b, hparameters) = cache
# Retrieve dimensions from A_prev's shape
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve dimensions from W's shape
(f, f, n_C_prev, n_C) = W.shape
# Retrieve information from "hparameters"
stride = hparameters["stride"]
pad = hparameters["pad"]
# Retrieve dimensions from dZ's shape
(m, n_H, n_W, n_C) = dZ.shape
# Initialize dA_prev, dW, db with the correct shapes
dA_prev = np.zeros((m, n_H_prev, n_W_prev, n_C_prev))
dW = np.zeros((f, f, n_C_prev, n_C))
db = np.zeros((1, 1, 1, n_C))
# Pad A_prev and dA_prev
A_prev_pad = zero_pad(A_prev, pad)
dA_prev_pad = zero_pad(dA_prev, pad)
for i in range(m): # loop over the training examples
# select ith training example from A_prev_pad and dA_prev_pad
a_prev_pad = A_prev_pad[i,:,:,:]
da_prev_pad = dA_prev_pad[i,:,:,:]
for h in range(n_H): # loop over vertical axis of the output volume
for w in range(n_W): # loop over horizontal axis of the output volume
for c in range(n_C): # loop over the channels of the output volume
# Find the corners of the current "slice"
vert_start = h*stride
vert_end = vert_start + f
horiz_start = w*stride
horiz_end = horiz_start +f
# Use the corners to define the slice from a_prev_pad
a_slice = a_prev_pad[vert_start:vert_end,horiz_start:horiz_end,: ]
# Update gradients for the window and the filter's parameters using the code formulas given above
da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]
dW[:,:,:,c] += a_slice * dZ[i, h, w, c]
db[:,:,:,c] += dZ[i, h, w, c]
# Set the ith training example's dA_prev to the unpaded da_prev_pad (Hint: use X[pad:-pad, pad:-pad, :])
dA_prev[i, :, :, :] = da_prev_pad[pad:-pad, pad:-pad, :]
### END CODE HERE ###
# Making sure your output shape is correct
assert(dA_prev.shape == (m, n_H_prev, n_W_prev, n_C_prev))
return dA_prev, dW, db
# In[37]:
np.random.seed(1)
dA, dW, db = conv_backward(Z, cache_conv)
print("dA_mean =", np.mean(dA))
print("dW_mean =", np.mean(dW))
print("db_mean =", np.mean(db))
# ** Expected Output: **
# <table>
# <tr>
# <td>
# **dA_mean**
# </td>
# <td>
# 9.60899067587
# </td>
# </tr>
# <tr>
# <td>
# **dW_mean**
# </td>
# <td>
# 10.5817412755
# </td>
# </tr>
# <tr>
# <td>
# **db_mean**
# </td>
# <td>
# 76.3710691956
# </td>
# </tr>
#
# </table>
#
# ## 5.2 Pooling layer - backward pass
#
# Next, let's implement the backward pass for the pooling layer, starting with the MAX-POOL layer. Even though a pooling layer has no parameters for backprop to update, you still need to backpropagation the gradient through the pooling layer in order to compute gradients for layers that came before the pooling layer.
#
# ### 5.2.1 Max pooling - backward pass
#
# Before jumping into the backpropagation of the pooling layer, you are going to build a helper function called `create_mask_from_window()` which does the following:
#
# $$ X = \begin{bmatrix}
# 1 && 3 \\
# 4 && 2
# \end{bmatrix} \quad \rightarrow \quad M =\begin{bmatrix}
# 0 && 0 \\
# 1 && 0
# \end{bmatrix}\tag{4}$$
#
# As you can see, this function creates a "mask" matrix which keeps track of where the maximum of the matrix is. True (1) indicates the position of the maximum in X, the other entries are False (0). You'll see later that the backward pass for average pooling will be similar to this but using a different mask.
#
# **Exercise**: Implement `create_mask_from_window()`. This function will be helpful for pooling backward.
# Hints:
# - [np.max()]() may be helpful. It computes the maximum of an array.
# - If you have a matrix X and a scalar x: `A = (X == x)` will return a matrix A of the same size as X such that:
# ```
# A[i,j] = True if X[i,j] = x
# A[i,j] = False if X[i,j] != x
# ```
# - Here, you don't need to consider cases where there are several maxima in a matrix.
# In[38]:
def create_mask_from_window(x):
"""
Creates a mask from an input matrix x, to identify the max entry of x.
Arguments:
x -- Array of shape (f, f)
Returns:
mask -- Array of the same shape as window, contains a True at the position corresponding to the max entry of x.
"""
### START CODE HERE ### (β1 line)
mask = (x==np.max(x))
### END CODE HERE ###
return mask
# In[39]:
np.random.seed(1)
x = np.random.randn(2,3)
mask = create_mask_from_window(x)
print('x = ', x)
print("mask = ", mask)
# **Expected Output:**
#
# <table>
# <tr>
# <td>
#
# **x =**
# </td>
#
# <td>
#
# [[ 1.62434536 -0.61175641 -0.52817175] <br>
# [-1.07296862 0.86540763 -2.3015387 ]]
#
# </td>
# </tr>
#
# <tr>
# <td>
# **mask =**
# </td>
# <td>
# [[ True False False] <br>
# [False False False]]
# </td>
# </tr>
#
#
# </table>
# Why do we keep track of the position of the max? It's because this is the input value that ultimately influenced the output, and therefore the cost. Backprop is computing gradients with respect to the cost, so anything that influences the ultimate cost should have a non-zero gradient. So, backprop will "propagate" the gradient back to this particular input value that had influenced the cost.
# ### 5.2.2 - Average pooling - backward pass
#
# In max pooling, for each input window, all the "influence" on the output came from a single input value--the max. In average pooling, every element of the input window has equal influence on the output. So to implement backprop, you will now implement a helper function that reflects this.
#
# For example if we did average pooling in the forward pass using a 2x2 filter, then the mask you'll use for the backward pass will look like:
# $$ dZ = 1 \quad \rightarrow \quad dZ =\begin{bmatrix}
# 1/4 && 1/4 \\
# 1/4 && 1/4
# \end{bmatrix}\tag{5}$$
#
# This implies that each position in the $dZ$ matrix contributes equally to output because in the forward pass, we took an average.
#
# **Exercise**: Implement the function below to equally distribute a value dz through a matrix of dimension shape. [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ones.html)
# In[42]:
def distribute_value(dz, shape):
"""
Distributes the input value in the matrix of dimension shape
Arguments:
dz -- input scalar
shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz
Returns:
a -- Array of size (n_H, n_W) for which we distributed the value of dz
"""
### START CODE HERE ###
# Retrieve dimensions from shape (β1 line)
(n_H, n_W) = shape
# Compute the value to distribute on the matrix (β1 line)
average = 1/(n_H*n_W)
# Create a matrix where every entry is the "average" value (β1 line)
a = np.ones(shape)*average
### END CODE HERE ###
return a
# In[43]:
a = distribute_value(2, (2,2))
print('distributed value =', a)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# distributed_value =
# </td>
# <td>
# [[ 0.5 0.5]
# <br\>
# [ 0.5 0.5]]
# </td>
# </tr>
# </table>
# ### 5.2.3 Putting it together: Pooling backward
#
# You now have everything you need to compute backward propagation on a pooling layer.
#
# **Exercise**: Implement the `pool_backward` function in both modes (`"max"` and `"average"`). You will once again use 4 for-loops (iterating over training examples, height, width, and channels). You should use an `if/elif` statement to see if the mode is equal to `'max'` or `'average'`. If it is equal to 'average' you should use the `distribute_value()` function you implemented above to create a matrix of the same shape as `a_slice`. Otherwise, the mode is equal to '`max`', and you will create a mask with `create_mask_from_window()` and multiply it by the corresponding value of dZ.
# In[48]:
def pool_backward(dA, cache, mode = "max"):
"""
Implements the backward pass of the pooling layer
Arguments:
dA -- gradient of cost with respect to the output of the pooling layer, same shape as A
cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev
"""
### START CODE HERE ###
# Retrieve information from cache (β1 line)
(A_prev, hparameters) = cache
# Retrieve hyperparameters from "hparameters" (β2 lines)
stride = hparameters["stride"]
f = hparameters["f"]
# Retrieve dimensions from A_prev's shape and dA's shape (β2 lines)
m, n_H_prev, n_W_prev, n_C_prev = A_prev.shape
m, n_H, n_W, n_C = dA.shape
# Initialize dA_prev with zeros (β1 line)
dA_prev = np.zeros(A_prev.shape)
for i in range(m): # loop over the training examples
# select training example from A_prev (β1 line)
a_prev = A_prev[i,:,:,:]
for h in range(n_H): # loop on the vertical axis
for w in range(n_W): # loop on the horizontal axis
for c in range(n_C): # loop over the channels (depth)
# Find the corners of the current "slice" (β4 lines)
vert_start = h*stride
vert_end = vert_start + f
horiz_start = w*stride
horiz_end = horiz_start + f
# Compute the backward propagation in both modes.
if mode == "max":
# Use the corners and "c" to define the current slice from a_prev (β1 line)
a_prev_slice = a_prev[vert_start:vert_end, horiz_start:horiz_end, c]
# Create the mask from a_prev_slice (β1 line)
mask = create_mask_from_window(a_prev_slice)
# Set dA_prev to be dA_prev + (the mask multiplied by the correct entry of dA) (β1 line)
dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += mask*dA[i, h, w, c]
elif mode == "average":
# Get the value a from dA (β1 line)
da = dA[i, h, w, c]
# Define the shape of the filter as fxf (β1 line)
shape = distribute_value(f, (f,f))
# Distribute it to get the correct slice of dA_prev. i.e. Add the distributed value of da. (β1 line)
dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += shape*da
### END CODE ###
# Making sure your output shape is correct
assert(dA_prev.shape == A_prev.shape)
return dA_prev
# In[49]:
np.random.seed(1)
A_prev = np.random.randn(5, 5, 3, 2)
hparameters = {"stride" : 1, "f": 2}
A, cache = pool_forward(A_prev, hparameters)
dA = np.random.randn(5, 4, 2, 2)
dA_prev = pool_backward(dA, cache, mode = "max")
print("mode = max")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
print()
dA_prev = pool_backward(dA, cache, mode = "average")
print("mode = average")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
# **Expected Output**:
#
# mode = max:
# <table>
# <tr>
# <td>
#
# **mean of dA =**
# </td>
#
# <td>
#
# 0.145713902729
#
# </td>
# </tr>
#
# <tr>
# <td>
# **dA_prev[1,1] =**
# </td>
# <td>
# [[ 0. 0. ] <br>
# [ 5.05844394 -1.68282702] <br>
# [ 0. 0. ]]
# </td>
# </tr>
# </table>
#
# mode = average
# <table>
# <tr>
# <td>
#
# **mean of dA =**
# </td>
#
# <td>
#
# 0.145713902729
#
# </td>
# </tr>
#
# <tr>
# <td>
# **dA_prev[1,1] =**
# </td>
# <td>
# [[ 0.08485462 0.2787552 ] <br>
# [ 1.26461098 -0.25749373] <br>
# [ 1.17975636 -0.53624893]]
# </td>
# </tr>
# </table>
# ### Congratulations !
#
# Congratulation on completing this assignment. You now understand how convolutional neural networks work. You have implemented all the building blocks of a neural network. In the next assignment you will implement a ConvNet using TensorFlow.
| gpl-3.0 | 8,197,801,481,799,372,000 | 36.275828 | 590 | 0.60285 | false | 3.186021 | false | false | false |
xz/cyber-security | api/run.py | 10 | 1269 | #!/usr/bin/python3
"""
picoCTF API Startup script
"""
import api
from argparse import ArgumentParser
from api.app import app
def main():
"""
Runtime management of the picoCTF API
"""
parser = ArgumentParser(description="picoCTF API configuration")
parser.add_argument("-v", "--verbose", action="count", help="increase verbosity", default=0)
parser.add_argument("-p", "--port", action="store", help="port the server should listen on.", type=int, default=8000)
parser.add_argument("-l", "--listen", action="store", help="host the server should listen on.", default="0.0.0.0")
parser.add_argument("-d", "--debug", action="store_true", help="run the server in debug mode.", default=False)
args = parser.parse_args()
keyword_args, _ = object_from_args(args)
api.app.config_app().run(host=args.listen, port=args.port, debug=args.debug)
def object_from_args(args):
"""
Turns argparser's namespace into something manageable by an external library.
Args:
args: The result from parse.parse_args
Returns:
A tuple of a dict representing the kwargs and a list of the positional arguments.
"""
return dict(args._get_kwargs()), args._get_args() # pylint: disable=protected-access
main()
| mit | 3,113,173,394,285,297,000 | 29.214286 | 121 | 0.675335 | false | 3.721408 | false | false | false |
astraw/mplsizer | demo/demo_gridsizer2.py | 2 | 1658 | import pylab
import numpy
# Demonstration of MplGridSizer use.
def labelax(ax,label):
ax.text(0.5,0.5,label,
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes,
)
fig = pylab.figure(figsize=(8,6))
# Axes placement doesn't matter, but to make sure matplotlib doesn't
# simply return a previous Axes instance with the same bounding box,
# assign a different label to each Axes instance.
import mpl_toolkits.mplsizer as mplsizer
frame = mplsizer.MplSizerFrame( fig )
sizer = mplsizer.MplBoxSizer()#orientation='horizontal')
frame.SetSizer(sizer)#,expand=1)
x = numpy.linspace(0,2*numpy.pi,100)
y = numpy.sin(1*x+numpy.pi/2) + .5*numpy.sin(3*x)
cols = 3
rows = 4
hsizer = mplsizer.MplGridSizer(cols=cols)#,vgap_inch=0.1)
for r in range(rows):
for c in range(cols):
if r==1 and c==1:
# This is how to add an empty element.
ax = mplsizer.MplSizerElement()
else:
# The unique labels are required to generate separate Axes instances.
ax = fig.add_axes([0,0,1,1],label='row %d col %d'%(r,c))
ax.plot(x,y)
labelax(ax,'%d,%d'%(r,c))
if not (r==2 and c==2):
# Draw tick labels on one Axes instance.
pylab.setp(ax,'xticks',[])
pylab.setp(ax,'yticks',[])
# The "border" value below was hand-tuned to not overlap.
hsizer.Add(ax,name='row %d, col %d'%(r,c),all=1,border=0.3,expand=1)
sizer.Add(hsizer,all=1,bottom=1,border=0.25,expand=1,option=1)
frame.Layout() # Trigger the layout within mplsizer.
pylab.show()
| mit | -2,350,235,128,957,835,300 | 29.703704 | 81 | 0.623643 | false | 3.128302 | false | false | false |
swegener/gruvi | tests/perf_dbus.py | 2 | 1878 | #
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2014 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function, division
import time
import unittest
from gruvi.dbus import DbusClient, DbusServer
from support import PerformanceTest
from test_dbus import echo_app
class PerfDBus(PerformanceTest):
def perf_message_throughput_pipe(self):
# Test roundtrips of a simple method call over a Pipe
server = DbusServer(echo_app)
addr = 'unix:path=' + self.pipename()
server.listen(addr)
client = DbusClient()
client.connect(addr)
nmessages = 0
t0 = t1 = time.time()
while t1 - t0 < 0.2:
client.call_method('bus.name', '/path', 'my.iface', 'Echo')
t1 = time.time()
nmessages += 1
throughput = nmessages / (t1 - t0)
self.add_result(throughput)
server.close()
client.close()
def perf_message_throughput_tcp(self):
# Test roundtrips of a simple method call over TCP.
server = DbusServer(echo_app)
addr = 'tcp:host=127.0.0.1,port=0'
server.listen(addr)
client = DbusClient()
client.connect(server.addresses[0])
nmessages = 0
t0 = t1 = time.time()
while t1 - t0 < 0.2:
client.call_method('bus.name', '/path', 'my.iface', 'Echo')
t1 = time.time()
nmessages += 1
throughput = nmessages / (t1 - t0)
self.add_result(throughput)
server.close()
client.close()
if __name__ == '__main__':
unittest.defaultTestLoader.testMethodPrefix = 'perf'
unittest.main()
| mit | 1,150,178,752,588,408,600 | 30.3 | 73 | 0.613419 | false | 3.590822 | true | false | false |
ScreamingUdder/mantid | qt/python/mantidqt/widgets/algorithmselector/model.py | 1 | 2905 | from __future__ import absolute_import, print_function
from mantid import AlgorithmFactory
class AlgorithmSelectorModel(object):
"""
This is a model for the algorithm selector widget.
"""
algorithm_key = '_'
def __init__(self, presenter, include_hidden=False):
"""
Initialise a new instance of AlgorithmSelectorModel
:param presenter: A presenter controlling this model.
:param include_hidden: If True the widget must include all hidden algorithms
"""
self.presenter = presenter
self.include_hidden = include_hidden
def get_algorithm_data(self):
"""
Prepare the algorithm description data for displaying in the view.
:return: A tuple of two elements. The first is a list of all algorithm names.
The second is a tree of nested dicts where keys either category names
or self.algorithm_key. Values of the category names are sub-trees
and those of self.algorithm_key have dicts mapping algorithm names
to lists of their versions. For example (as yaml):
Arithmetic:
Errors:
_:
PoissonErrors: [1]
SetUncertainties: [1]
SignalOverError: [1]
FFT:
_:
ExtractFFTSpectrum: [1]
FFT: [1]
FFTDerivative: [1]
FFTSmooth: [1, 2]
_:
CrossCorrelate: [1]
Divide: [1]
Exponential: [1]
GeneralisedSecondDifference: [1]
Here self.algorithm_key == '_'
"""
descriptors = AlgorithmFactory.getDescriptors(self.include_hidden)
algorithm_names = []
data = {}
for descriptor in descriptors:
# descriptor is a class with data fields: name, alias, category, version
if descriptor.name not in algorithm_names:
algorithm_names.append(descriptor.name)
categories = descriptor.category.split('\\')
# Create nested dictionaries in which the key is a category and the value
# is a similar dictionary with sub-categories as keys
d = data
for cat in categories:
if cat not in d:
d[cat] = {}
d = d[cat]
# Entry with key == '' contains a dict with algorithm names as keys
# The values are lists of version numbers
if self.algorithm_key not in d:
d[self.algorithm_key] = {}
d = d[self.algorithm_key]
if descriptor.name not in d:
d[descriptor.name] = []
d[descriptor.name].append(descriptor.version)
return algorithm_names, data
| gpl-3.0 | -8,020,683,523,555,640,000 | 38.256757 | 85 | 0.546299 | false | 5.017271 | false | false | false |
bauerca/BikeShareDemand_Carl | lr.py | 1 | 4253 | import numpy
import pandas
import pylab
import datetime
import sys
from sklearn import linear_model
date_format = '%Y-%m-%d %H:%M:%S'
train = numpy.genfromtxt('train.csv', delimiter=',')
data = pandas.read_csv('train.csv')
dates = [datetime.datetime.strptime(s, date_format) for s in data['datetime']]
hours = [date.hour for date in dates]
days = [date.day for date in dates]
data['hour'] = hours
data['day'] = days
pylab.plot_date(dates, data['count'])
pylab.plot_date(dates, data['registered'])
#pylab.plot_date(dates, data['atemp'])
#pylab.show()
# There are vivid peaks at morning and evening rush hour in the later
# months. Afternoon rush tends to be more spread out, people leaving work
# varying times, but everyone going in together?
# Counts oscillate with a day period, amplitude grows as the service
# becomes popular.
# Registered users seem to match total count more often on workdays.
#
# Model function:
#
# count = pop(b1, ..., bk) * ( sin(T = day) + exp(sigma = 1hr) + exp(
def bump(hour, center, half_width):
offset = hour - center
if abs(offset) > half_width:
return 0.0
else:
return 0.5 * numpy.cos(numpy.pi * offset / half_width) + 0.5
MORNING_HALF_WIDTH = 1.0 #hours
MORNING_RUSH_HOUR = 8.0 #AM
def morning_bump_fn(hour):
return bump(hour, 8.0, 1.0)
def evening_bump_fn(hour):
return bump(hour, 16.0, 2.0)
# The popularity will be the average of the previous day's counts
work_popularity = 0.0
work_counts = 0
holiday_popularity = 0.0
holiday_counts = 0
def workday_predictor(row, popularity):
"""
The predictor
"""
date = datetime.datetime.strptime(row['datetime'], date_format)
return [
popularity * (1.0 - numpy.cos(2.0 * numpy.pi * date.hour / 24.0)),
popularity * morning_bump(date.hour),
popularity * evening_bump(date.hour),
row['atemp'],
row['humidity']
]
def holiday_predictor(row, popularity):
"""
Arguments:
- row: A data sample.
- popularity (float): Scales the bump features. Should reflect the day's
popularity measure.
"""
date = datetime.datetime.strptime(row['datetime'], date_format)
return [
popularity * (1.0 - numpy.cos(2.0 * numpy.pi * date.hour / 24.0)),
0.0,
0.0,
row['atemp'],
row['humidity']
]
morning_bump = pandas.Series(range(0, 24), dtype='float')
morning_bump.map(morning_bump_fn)
evening_bump = pandas.Series(range(0, 24), dtype='float')
evening_bump.map(evening_bump_fn)
day_bump = pandas.Series(range(0, 24), dtype='float')
day_bump.map(lambda hour: 1.0 - numpy.cos(2.0 * numpy.pi * hour / 24.0))
# Gather training data.
day = 0
date = None
hour = 0
predictors = None #pandas.DataFrame(columns=['day_bump', 'morning_bump', 'evening_bump'])
i = 0
while i < len(data):
day_start = i
current_day = data.loc[i, 'day']
# Get day slice
while i < len(data) and data.loc[i, 'day'] == current_day:
i += 1
day_end = i
dayta = data[day_start:day_end]
#print 'Day', day, 'data'
#print dayta
#print data.loc[day_start].datetime, 'to', data.loc[day_end].datetime
popularity = dayta['count'].mean()
day_predictors = pandas.DataFrame()
day_predictors['day_bump'] = dayta['hour'].map(lambda hour: popularity * (1.0 - numpy.cos(2.0 * numpy.pi * hour / 24.0)))
if dayta.loc[day_start, 'workingday'] == 1:
day_predictors['morning_bump'] = dayta['hour'].map(lambda hour: popularity * morning_bump_fn(hour))
day_predictors['evening_bump'] = dayta['hour'].map(lambda hour: popularity * evening_bump_fn(hour))
else:
day_predictors['morning_bump'] = numpy.zeros(len(dayta))
day_predictors['evening_bump'] = numpy.zeros(len(dayta))
day_predictors.index = dayta.index
#print day_predictors
if predictors is None:
predictors = day_predictors
else:
predictors = predictors.append(day_predictors)
day += 1
#print predictors
#pylab.plot_date(dates, predictors['day_bump'])
#pylab.plot_date(dates, predictors['morning_bump'])
#pylab.plot_date(dates, predictors['evening_bump'])
lr = linear_model.LinearRegression()
lr.fit(predictors.values, data[['registered', 'count']])
res = lr.predict(predictors.values)
print res.shape
pylab.plot_date(dates, res[:,0])
pylab.plot_date(dates, res[:,1])
pylab.show()
#numpy.linalg.svd()
| gpl-2.0 | -8,964,259,412,971,205,000 | 24.775758 | 123 | 0.675288 | false | 2.937155 | false | false | false |
danforthcenter/plantcv | plantcv/plantcv/crop_position_mask.py | 2 | 6940 | # Crop position mask
import cv2
import numpy as np
import math
import os
from plantcv.plantcv import print_image
from plantcv.plantcv import plot_image
from plantcv.plantcv import fatal_error
from plantcv.plantcv import params
def crop_position_mask(img, mask, x, y, v_pos="top", h_pos="right"):
"""Crop position mask
Inputs:
img = RGB or grayscale image data for plotting
mask = Binary mask to use (must be correct size, if, not use make_resize_mask function)
x = x position
y = y position
v_pos = push from "top" or "bottom"
h_pos = push to "right" or "left"
Returns:
newmask = image mask
:param img: numpy.ndarray
:param mask: numpy.ndarray
:param x: int
:param y: int
:param v_pos: str
:param h_pos: str
:return newmask: numpy.ndarray
"""
params.device += 1
if x < 0 or y < 0:
fatal_error("x and y cannot be negative numbers or non-integers")
# get the sizes of the images
# subtract 1 from x and y since python counts start from 0
if y != 0:
y = y - 1
if x != 0:
x = x - 1
if len(np.shape(img)) == 3:
ix, iy, iz = np.shape(img)
ori_img = np.copy(img)
else:
ix, iy = np.shape(img)
ori_img = np.dstack((img, img, img))
if len(np.shape(mask)) == 3:
mx, my, mz = np.shape(mask)
mask = mask[0]
else:
mx, my = np.shape(mask)
# resize the images so they are equal in size and centered
if mx >= ix:
r = mx - ix
if r % 2 == 0:
r1 = int(np.rint(r / 2.0))
r2 = r1
else:
r1 = int(np.rint(r / 2.0))
r2 = r1 - 1
mask = mask[r1:mx - r2, 0:my]
if my >= iy:
r = my - iy
if r % 2 == 0:
r1 = int(np.rint(r / 2.0))
r2 = r1
else:
r1 = int(np.rint(r / 2.0))
r2 = r1 - 1
mask = mask[0:mx, r1:my - r2]
# New mask shape
mx, my = np.shape(mask)
if v_pos.upper() == "TOP":
# Add rows to the top
top = np.zeros((x, my), dtype=np.uint8)
maskv = np.vstack((top, mask))
mx, my = np.shape(maskv)
if mx >= ix:
maskv = maskv[0:ix, 0:my]
if mx < ix:
r = ix - mx
if r % 2 == 0:
r1 = int(r / 2.0)
rows1 = np.zeros((r1, my), dtype=np.uint8)
maskv = np.vstack((rows1, maskv, rows1))
else:
r1 = int(math.ceil(r / 2.0))
r2 = r1 - 1
rows1 = np.zeros((r1, my), dtype=np.uint8)
rows2 = np.zeros((r2, my), dtype=np.uint8)
maskv = np.vstack((rows1, maskv, rows2))
if params.debug == 'print':
print_image(maskv, os.path.join(params.debug_outdir, str(params.device) + "_push-top.png"))
elif params.debug == 'plot':
plot_image(maskv, cmap='gray')
elif v_pos.upper() == "BOTTOM":
# Add rows to the bottom
bottom = np.zeros((x, my), dtype=np.uint8)
maskv = np.vstack((mask, bottom))
mx, my = np.shape(maskv)
if mx >= ix:
maskdiff = mx - ix
maskv = maskv[maskdiff:mx, 0:my]
if mx < ix:
r = ix - mx
if r % 2 == 0:
r1 = int(r / 2.0)
rows1 = np.zeros((r1, my), dtype=np.uint8)
maskv = np.vstack((rows1, maskv, rows1))
else:
r1 = int(math.ceil(r / 2.0))
r2 = r1 - 1
rows1 = np.zeros((r1, my), dtype=np.uint8)
rows2 = np.zeros((r2, my), dtype=np.uint8)
maskv = np.vstack((rows1, maskv, rows2))
if params.debug == 'print':
print_image(maskv, os.path.join(params.debug_outdir, str(params.device) + "_push-bottom.png"))
elif params.debug == 'plot':
plot_image(maskv, cmap='gray')
else:
fatal_error(str(v_pos) + ' is not valid, must be "top" or "bottom"!')
if h_pos.upper() == "LEFT":
mx, my = np.shape(maskv)
# Add rows to the left
left = np.zeros((mx, y), dtype=np.uint8)
maskv = np.hstack((left, maskv))
mx, my = np.shape(maskv)
if my >= iy:
maskv = maskv[0:mx, 0:iy]
if my < iy:
c = iy - my
if c % 2 == 0:
c1 = int(c / 2.0)
col = np.zeros((mx, c1), dtype=np.uint8)
maskv = np.hstack((col, maskv, col))
else:
c1 = int(math.ceil(c / 2.0))
c2 = c1 - 1
col1 = np.zeros((mx, c1), dtype=np.uint8)
col2 = np.zeros((mx, c2), dtype=np.uint8)
maskv = np.hstack((col1, maskv, col2))
if params.debug == 'print':
print_image(maskv, os.path.join(params.debug_outdir, str(params.device) + "_push-left.png"))
elif params.debug == 'plot':
plot_image(maskv, cmap='gray')
elif h_pos.upper() == "RIGHT":
mx, my = np.shape(maskv)
# Add rows to the left
right = np.zeros((mx, y), dtype=np.uint8)
maskv = np.hstack((maskv, right))
mx, my = np.shape(maskv)
if my >= iy:
ex = my - iy
maskv = maskv[0:mx, ex:my]
if my < iy:
c = iy - my
if c % 2 == 0:
c1 = int(c / 2.0)
col = np.zeros((mx, c1), dtype=np.uint8)
maskv = np.hstack((col, maskv, col))
else:
c1 = int(math.ceil(c / 2.0))
c2 = c1 - 1
col1 = np.zeros((mx, c1), dtype=np.uint8)
col2 = np.zeros((mx, c2), dtype=np.uint8)
maskv = np.hstack((col1, maskv, col2))
if params.debug == 'print':
print_image(maskv, os.path.join(params.debug_outdir, str(params.device) + "_push-right.png"))
elif params.debug == 'plot':
plot_image(maskv, cmap='gray')
else:
fatal_error(str(h_pos) + ' is not valid, must be "left" or "right"!')
newmask = np.array(maskv)
if params.debug is not None:
if params.debug == 'print':
print_image(newmask, os.path.join(params.debug_outdir, str(params.device) + "_newmask.png"))
elif params.debug == 'plot':
plot_image(newmask, cmap='gray')
objects, hierarchy = cv2.findContours(np.copy(newmask), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]
for i, cnt in enumerate(objects):
cv2.drawContours(ori_img, objects, i, (255, 102, 255), -1, lineType=8, hierarchy=hierarchy)
if params.debug == 'print':
print_image(ori_img, os.path.join(params.debug_outdir, str(params.device) + '_mask_overlay.png'))
elif params.debug == 'plot':
plot_image(ori_img)
return newmask
| mit | -4,923,069,766,332,989,000 | 30.261261 | 109 | 0.493804 | false | 3.177656 | false | false | false |
informatics-isi-edu/volspy | volspy/data.py | 1 | 6008 |
#
# Copyright 2014-2017 University of Southern California
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
#
"""Volume data support.
Volume data is loaded as an ND-array with axes ZYXC and shape
(D,H,W,C) using available volumetric image file readers.
The ImageCropper class encapsulates the state necessary to:
1. load a volume image file
2. prepare 3D texture data of an appropriate texture storage format
3. prepare 3D bounding box geometry
The ImageCropper also understands image file metadata returned by the
image reader to configure voxel aspect ratio for a spatial
interpretation of the volume data.
"""
import os
import numpy as np
import math
from vispy import gloo
from .util import load_and_mangle_image, bin_reduce
from .geometry import make_cube_clipped
class ImageManager (object):
def __init__(self, filename, reform_data=None):
I, self.meta, self.slice_origin = load_and_mangle_image(filename)
voxel_size = I.micron_spacing
try:
view_grid_microns = tuple(map(float, os.getenv('ZYX_VIEW_GRID').split(",")))
assert len(view_grid_microns) == 3
except:
view_grid_microns = (0.25, 0.25, 0.25)
print("Goal is %s micron view grid. Override with ZYX_VIEW_GRID='float,float,float'" % (view_grid_microns,))
view_reduction = tuple(map(lambda vs, ps: max(int(ps/vs), 1), voxel_size, view_grid_microns))
print("Using %s view reduction factor on %s image grid." % (view_reduction, voxel_size))
print("Final %s micron view grid after reduction." % (tuple(map(lambda vs, r: vs*r, voxel_size, view_reduction)),))
if reform_data is not None:
I = reform_data(I, self.meta, view_reduction)
voxel_size = list(map(lambda a, b: a*b, voxel_size, view_reduction))
self.Zaspect = voxel_size[0] / voxel_size[2]
self.data = I
self.last_channels = None
self.channels = None
self.set_view()
def min_pixel_step_size(self, outtexture=None):
if outtexture is not None:
D, H, W, C = outtexture.shape
else:
D, H, W, C = self.data.shape
span = max(W, H, D*self.Zaspect)
return 1./span
def set_view(self, anti_view=None, channels=None):
if anti_view is not None:
self.anti_view = anti_view
if channels is not None:
# use caller-specified sequence of channels
assert type(channels) is tuple
assert len(channels) <= 4
self.channels = channels
else:
# default to first N channels u to 4 for RGBA direct mapping
self.channels = tuple(range(0, min(self.data.shape[3], 4)))
for c in self.channels:
assert c >= 0
assert c < self.data.shape[3]
def _get_texture3d_format(self):
I0 = self.data
nc = len(self.channels)
if I0.dtype == np.uint8:
bps = 1
elif I0.dtype == np.uint16 or I0.dtype == np.int16:
bps = 2
else:
assert I0.dtype == np.float16 or I0.dtype == np.float32
bps = 2
#bps = 4
print((nc, bps))
return {
(1,1): ('luminance', 'red'),
(1,2): ('luminance', 'r16f'),
(1,4): ('luminance', 'r16f'),
(2,1): ('rg', 'rg'),
(2,2): ('rg', 'rg32f'),
(2,4): ('rg', 'rg32f'),
(3,1): ('rgb', 'rgb'),
(3,2): ('rgb', 'rgb16f'),
(3,4): ('rgb', 'rgb16f'),
(4,1): ('rgba', 'rgba'),
(4,2): ('rgba', 'rgba16f'),
(4,4): ('rgba', 'rgba16f')
}[(nc, bps)]
def get_texture3d(self, outtexture=None):
"""Pack N-channel image data into R, RG, RGB, RGBA Texture3D using self.channels projection.
outtexture:
None: allocate new Texture3D
not None: use existing Texture3D
sets data in outtexture and returns the texture.
"""
I0 = self.data
# choose size for texture data
D, H, W = self.data.shape[0:3]
C = len(self.channels)
if outtexture is None:
format, internalformat = self._get_texture3d_format()
print('allocating texture3D', (D, H, W, C), internalformat)
outtexture = gloo.Texture3D(shape=(D, H, W, C), format=format, internalformat=internalformat)
elif self.last_channels == self.channels:
print('reusing texture')
return outtexture
else:
print('regenerating texture')
print((D, H, W, C), '<-', I0.shape, list(self.channels), I0.dtype)
# normalize for OpenGL [0,1.0] or [0,2**N-1] and zero black-level
maxval = float(I0.max())
minval = float(I0.min())
if maxval > minval:
scale = 1.0/(maxval - minval)
else:
scale = 1.0
if I0.dtype == np.uint8 or I0.dtype == np.int8:
tmpout = np.zeros((D, H, W, C), dtype=np.uint8)
scale *= float(2**8-1)
else:
assert I0.dtype == np.float16 or I0.dtype == np.float32 or I0.dtype == np.uint16 or I0.dtype == np.int16
tmpout = np.zeros((D, H, W, C), dtype=np.uint16 )
scale *= (2.0**16-1)
# pack selected channels into texture
for i in range(C):
tmpout[:,:,:,i] = (I0[:,:,:,self.channels[i]].astype(np.float32) - minval) * scale
self.last_channels = self.channels
outtexture.set_data(tmpout)
return outtexture
def make_cube_clipped(self, dataplane=None):
"""Generate cube clipped against plane equation 4-tuple.
Excludes semi-space beneath plane, i.e. with negative plane
distance. Omitting plane produces regular unclipped cube.
"""
shape = self.data.shape[0:3]
return make_cube_clipped(shape, self.Zaspect, 2, dataplane)
| bsd-3-clause | -1,659,263,990,849,402,400 | 32.943503 | 123 | 0.570406 | false | 3.433143 | false | false | false |
ricardosiri68/patchcap | PatchMan/device.py | 1 | 3252 | from timeit import default_timer as timer
import base64
import logging
import os
import stat
import sys
import time
try:
import urllib.request as urllib2
import urllib.parse as urlparse
except ImportError:
import urllib2
import urlparse
from gi.repository import Gst, GObject
logger = logging.getLogger(__name__)
GObject.threads_init()
Gst.init(None)
class VirtualDevice(Gst.Bin):
__gstmetadata__ = (
'Open device based on condor configuration',
'Video Source',
'quesoy',
'Hernando Rojas <hrojas@lacuatro.com.ar>'
)
def __init__(self, url):
res = urlparse.urlparse(url)
super(VirtualDevice, self).__init__()
if res.scheme == "http":
self.src = Gst.ElementFactory.make('souphttpsrc', 'source')
self.src.set_property("uri", url)
elif res.scheme == "rtsp":
self.src = Gst.ElementFactory.make('rtspsrc', None)
self.src.set_property("location", url)
elif res.scheme == "file" or not res.scheme:
try:
if os.path.isfile(res.path):
self.src = Gst.ElementFactory.make("filesrc", "source")
self.src.set_property("location", res.path)
else:
st = os.stat(res.path)
if stat.S_ISCHR(st.st_mode):
self.src = Gst.ElementFactory.make("v4l2src", "source")
self.src.set_property("device", res.path)
except Exception as e:
self.src = Gst.ElementFactory.make("videotestsrc", "source")
logging.error("unable to parse URL '%s': %s"%(url, e))
self.dec = Gst.ElementFactory.make('decodebin', None)
self.dec.connect('pad-added', self.on_dec_src_pad_added)
self.add(self.src)
self.add(self.dec)
if self.src.get_static_pad('src'):
self.src.link(self.dec)
else:
self.src.connect('pad-added', self.on_src_pad_added)
self.video_pad = Gst.GhostPad.new_no_target("video_pad", Gst.PadDirection.SRC)
self.add_pad(self.video_pad)
#self.video_pad.connect('linked', self.on_deco_pad_linked)
#def on_deco_pad_linked(self, pad, peer):
# pad.add_probe(Gst.PadProbeType.BUFFER, self.rec_buff, 0)
# used to log buffer timestamps
# def rec_buff(self, pad, info, data):
# VirtualDevice.gt[info.get_buffer().pts] = timer()
# return Gst.PadProbeReturn.OK
def on_src_pad_added(self, element, pad):
caps = pad.get_current_caps()
cap = caps.get_structure(0)
if cap.get_string('media')=='video':
pad.link(self.dec.get_static_pad('sink'))
def on_dec_src_pad_added(self, element, pad):
caps = pad.get_current_caps()
if caps.to_string().startswith('video/'):
self.video_pad.set_target(pad)
self.post_message(Gst.Message.new_application(self, caps.get_structure(0)))
def __repr__(self):
return self.__str__()
def __str__(self):
return self.name + "[%s]"%self.src
GObject.type_register(VirtualDevice)
__gstelementfactory__ = ("condordevice", Gst.Rank.NONE, VirtualDevice)
| gpl-2.0 | -5,379,833,639,723,896,000 | 31.19802 | 88 | 0.588561 | false | 3.444915 | false | false | false |