commit
stringlengths
40
40
subject
stringlengths
1
1.49k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
new_contents
stringlengths
1
29.8k
old_contents
stringlengths
0
9.9k
lang
stringclasses
3 values
proba
float64
0
1
ca8dec97321fdf2ceee459b95c3d885edebca15b
Bump DeletionWatcher up to 20 minutes
deletionwatcher.py
deletionwatcher.py
import json import requests import time import websocket from bs4 import BeautifulSoup from threading import Thread from metasmoke import Metasmoke from globalvars import GlobalVars from datahandling import is_false_positive, is_ignored_post, get_post_site_id_link class DeletionWatcher: @classmethod def update_site_id_list(self): soup = BeautifulSoup(requests.get("http://meta.stackexchange.com/topbar/site-switcher/site-list").text) site_id_dict = {} for site in soup.findAll("a", attrs={"data-id": True}): site_name = site["href"][2:] site_id = site["data-id"] site_id_dict[site_name] = site_id GlobalVars.site_id_dict = site_id_dict @classmethod def check_websocket_for_deletion(self, post_site_id, post_url, timeout): time_to_check = time.time() + timeout post_id = post_site_id[0] post_type = post_site_id[2] if post_type == "answer": question_id = str(get_post_site_id_link(post_site_id)) if question_id is None: return else: question_id = post_id post_site = post_site_id[1] if post_site not in GlobalVars.site_id_dict: return site_id = GlobalVars.site_id_dict[post_site] ws = websocket.create_connection("ws://qa.sockets.stackexchange.com/") ws.send(site_id + "-question-" + question_id) while time.time() < time_to_check: ws.settimeout(time_to_check - time.time()) try: a = ws.recv() except websocket.WebSocketTimeoutException: t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post, args=(post_url, False)) t_metasmoke.start() return False if a is not None and a != "": try: d = json.loads(json.loads(a)["data"]) except: continue if d["a"] == "post-deleted" and str(d["qId"]) == question_id and ((post_type == "answer" and "aId" in d and str(d["aId"]) == post_id) or post_type == "question"): t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post, args=(post_url, True)) t_metasmoke.start() return True t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post, args=(post_url, False)) t_metasmoke.start() return False @classmethod def check_if_report_was_deleted(self, post_site_id, post_url, message): was_report_deleted = self.check_websocket_for_deletion(post_site_id, post_url, 1200) if was_report_deleted: try: message.delete() except: pass @classmethod def post_message_if_not_deleted(self, post_site_id, post_url, message_text, room): was_report_deleted = self.check_websocket_for_deletion(post_site_id, post_url, 300) if not was_report_deleted and not is_false_positive(post_site_id[0:2]) and not is_ignored_post(post_site_id[0:2]): room.send_message(message_text)
import json import requests import time import websocket from bs4 import BeautifulSoup from threading import Thread from metasmoke import Metasmoke from globalvars import GlobalVars from datahandling import is_false_positive, is_ignored_post, get_post_site_id_link class DeletionWatcher: @classmethod def update_site_id_list(self): soup = BeautifulSoup(requests.get("http://meta.stackexchange.com/topbar/site-switcher/site-list").text) site_id_dict = {} for site in soup.findAll("a", attrs={"data-id": True}): site_name = site["href"][2:] site_id = site["data-id"] site_id_dict[site_name] = site_id GlobalVars.site_id_dict = site_id_dict @classmethod def check_websocket_for_deletion(self, post_site_id, post_url, timeout): time_to_check = time.time() + timeout post_id = post_site_id[0] post_type = post_site_id[2] if post_type == "answer": question_id = str(get_post_site_id_link(post_site_id)) if question_id is None: return else: question_id = post_id post_site = post_site_id[1] if post_site not in GlobalVars.site_id_dict: return site_id = GlobalVars.site_id_dict[post_site] ws = websocket.create_connection("ws://qa.sockets.stackexchange.com/") ws.send(site_id + "-question-" + question_id) while time.time() < time_to_check: ws.settimeout(time_to_check - time.time()) try: a = ws.recv() except websocket.WebSocketTimeoutException: t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post, args=(post_url, False)) t_metasmoke.start() return False if a is not None and a != "": try: d = json.loads(json.loads(a)["data"]) except: continue if d["a"] == "post-deleted" and str(d["qId"]) == question_id and ((post_type == "answer" and "aId" in d and str(d["aId"]) == post_id) or post_type == "question"): t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post, args=(post_url, True)) t_metasmoke.start() return True t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post, args=(post_url, False)) t_metasmoke.start() return False @classmethod def check_if_report_was_deleted(self, post_site_id, post_url, message): was_report_deleted = self.check_websocket_for_deletion(post_site_id, post_url, 600) if was_report_deleted: try: message.delete() except: pass @classmethod def post_message_if_not_deleted(self, post_site_id, post_url, message_text, room): was_report_deleted = self.check_websocket_for_deletion(post_site_id, post_url, 300) if not was_report_deleted and not is_false_positive(post_site_id[0:2]) and not is_ignored_post(post_site_id[0:2]): room.send_message(message_text)
Python
0
a492e805fa51940d746a1d251232bc4f13417165
fix waftools/man.py to install manpages again.
waftools/man.py
waftools/man.py
import Common, Object, Utils, Node, Params import sys, os import gzip from misc import copyobj def gzip_func(task): env = task.m_env infile = task.m_inputs[0].abspath(env) outfile = task.m_outputs[0].abspath(env) input = open(infile, 'r') output = gzip.GzipFile(outfile, mode='w') output.write(input.read()) return 0 class manobj(copyobj): def __init__(self, section=1, type='none'): copyobj.__init__(self, type) self.fun = gzip_func self.files = [] self.section = section def apply(self): lst = self.to_list(self.files) for file in lst: node = self.path.find_source(file) if not node: fatal('cannot find input file %s for processing' % file) target = self.target if not target or len(lst)>1: target = node.m_name newnode = self.path.find_build(file+'.gz') #target? if not newnode: newnode = Node.Node(file+'.gz', self.path) self.path.append_build(newnode) task = self.create_task('copy', self.env, 8) task.set_inputs(node) task.set_outputs(newnode) task.m_env = self.env task.fun = self.fun if Params.g_commands['install'] or Params.g_commands['uninstall']: Common.install_files('MANDIR', 'man' + str(self.section), newnode.abspath(self.env)) def setup(env): Object.register('man', manobj) def detect(conf): return 1
import Common, Object, Utils, Node, Params import sys, os import gzip from misc import copyobj def gzip_func(task): env = task.m_env infile = task.m_inputs[0].abspath(env) outfile = task.m_outputs[0].abspath(env) input = open(infile, 'r') output = gzip.GzipFile(outfile, mode='w') output.write(input.read()) return 0 class manobj(copyobj): def __init__(self, section=1, type='none'): copyobj.__init__(self, type) self.fun = gzip_func self.files = [] self.section = section def apply(self): lst = self.to_list(self.source) for file in lst: node = self.path.find_source(file) if not node: fatal('cannot find input file %s for processing' % file) target = self.target if not target or len(lst)>1: target = node.m_name newnode = self.path.find_build(file+'.gz') #target? if not newnode: newnode = Node.Node(file+'.gz', self.path) self.path.append_build(newnode) task = self.create_task('copy', self.env, 8) task.set_inputs(node) task.set_outputs(newnode) task.m_env = self.env task.fun = self.fun if Params.g_commands['install'] or Params.g_commands['uninstall']: Common.install_files('MANDIR', 'man' + str(self.section), newnode.abspath(self.env)) def setup(env): Object.register('man', manobj) def detect(conf): return 1
Python
0
f6648a0206258e911c0fe4c9c8d0b8cd1334d119
Test also what it returns from the api_translate view
appcomposer/tests/translator/test_sync.py
appcomposer/tests/translator/test_sync.py
import json from flask import request from mock import patch from appcomposer.login import graasp_oauth_login_redirect from appcomposer.tests.translator.fake_requests import create_requests_mock from appcomposer.tests.utils import ComposerTest from appcomposer.translator.tasks import synchronize_apps_no_cache_wrapper from appcomposer.translator.views import api_translations2, api_translate from appcomposer.translator.mongodb_pusher import mongo_translation_urls, mongo_bundles class TranslatorTest(ComposerTest): def setUp(self): super(TranslatorTest, self).setUp() mongo_translation_urls.remove() mongo_bundles.remove() def assertApp1(self): # Check MongoDB (English and Spanish) resultEngUrl = mongo_translation_urls.find_one({'_id':'en_ALL_ALL::http://url1/languages/en_ALL.xml'}) resultEngApp = mongo_bundles.find_one({'_id':'en_ALL_ALL::http://url1/gadget.xml'}) self.assertEquals(resultEngUrl['data'], resultEngApp['data']) data = json.loads(resultEngUrl['data']) self.assertEquals("Message1_1", data['message1_1']) self.assertEquals("Message2_1", data['message2_1']) self.assertEquals("Message3_1", data['message3_1']) self.assertEquals("Message4_1", data['message4_1']) resultSpaUrl = mongo_translation_urls.find_one({'_id':'es_ALL_ALL::http://url1/languages/en_ALL.xml'}) resultSpaApp = mongo_bundles.find_one({'_id':'es_ALL_ALL::http://url1/gadget.xml'}) self.assertEquals(resultSpaUrl['data'], resultSpaApp['data']) data = json.loads(resultSpaUrl['data']) self.assertEquals("Mensaje1_1", data['message1_1']) self.assertEquals("Mensaje2_1", data['message2_1']) self.assertEquals("Mensaje3_1", data['message3_1']) # This is self-filled by its English version self.assertEquals("Message4_1", data['message4_1']) request.args = {'app_url' : 'http://url1/gadget.xml'} # Check API english_results = api_translate('en_ALL', 'ALL').json self.assertFalse(english_results['automatic']) self.assertEquals(english_results['url'], 'http://url1/gadget.xml') message1_1 = english_results['translation']['message1_1'] self.assertFalse(message1_1['can_edit']) self.assertFalse(message1_1['from_default']) self.assertEquals("Message1_1", message1_1['source']) self.assertEquals("Message1_1", message1_1['target']) # In Spanish, the fourth message is special spanish_results = api_translate('es_ALL', 'ALL').json self.assertFalse(spanish_results['automatic']) self.assertEquals(spanish_results['url'], 'http://url1/gadget.xml') message1_1 = spanish_results['translation']['message1_1'] self.assertFalse(message1_1['can_edit']) self.assertFalse(message1_1['from_default']) self.assertEquals("Message1_1", message1_1['source']) self.assertEquals("Mensaje1_1", message1_1['target']) message4_1 = spanish_results['translation']['message4_1'] self.assertTrue(message4_1['can_edit']) self.assertTrue(message4_1['from_default']) self.assertEquals("Message4_1", message4_1['source']) self.assertEquals("Message4_1", message4_1['target']) # There is no translation to French, so it's automatic french_results = api_translate('fr_ALL', 'ALL').json french_results = api_translate('fr_ALL', 'ALL').json # TODO: this is a bug! # self.assertTrue(french_results['automatic']) self.assertEquals(french_results['url'], 'http://url1/gadget.xml') message1_1 = french_results['translation']['message1_1'] self.assertTrue(message1_1['can_edit']) self.assertFalse(message1_1['from_default']) self.assertEquals("Message1_1", message1_1['source']) self.assertIsNone(message1_1['target']) message4_1 = french_results['translation']['message4_1'] self.assertTrue(message4_1['can_edit']) self.assertFalse(message4_1['from_default']) self.assertEquals("Message4_1", message4_1['source']) self.assertIsNone(message4_1['target']) def assertApps(self): self.assertApp1() class TestSync(TranslatorTest): @patch("appcomposer.translator.utils.get_cached_session") @patch("requests.Session") def test_sync(self, mock_requests, mock_requests_cached_session): mock_requests().get = create_requests_mock() mock_requests_cached_session().get = create_requests_mock() graasp_oauth_login_redirect() synchronize_apps_no_cache_wrapper(None) self.assertApps() synchronize_apps_no_cache_wrapper(None) self.assertApps() @patch("appcomposer.translator.utils.get_cached_session") def test_sync2(self, mock): mock().get = create_requests_mock() synchronize_apps_no_cache_wrapper(None)
import json from mock import patch from appcomposer.login import graasp_oauth_login_redirect from appcomposer.tests.translator.fake_requests import create_requests_mock from appcomposer.tests.utils import ComposerTest from appcomposer.translator.tasks import synchronize_apps_no_cache_wrapper from appcomposer.translator.views import api_translations2 from appcomposer.translator.mongodb_pusher import mongo_translation_urls, mongo_bundles class TranslatorTest(ComposerTest): def setUp(self): super(TranslatorTest, self).setUp() mongo_translation_urls.remove() mongo_bundles.remove() def assertApp1(self): resultEngUrl = mongo_translation_urls.find_one({'_id':'en_ALL_ALL::http://url1/languages/en_ALL.xml'}) resultEngApp = mongo_bundles.find_one({'_id':'en_ALL_ALL::http://url1/gadget.xml'}) self.assertEquals(resultEngUrl['data'], resultEngApp['data']) data = json.loads(resultEngUrl['data']) self.assertEquals('Message1_1', data['message1_1']) self.assertEquals('Message2_1', data['message2_1']) self.assertEquals('Message3_1', data['message3_1']) self.assertEquals('Message4_1', data['message4_1']) resultSpaUrl = mongo_translation_urls.find_one({'_id':'es_ALL_ALL::http://url1/languages/en_ALL.xml'}) resultSpaApp = mongo_bundles.find_one({'_id':'es_ALL_ALL::http://url1/gadget.xml'}) self.assertEquals(resultSpaUrl['data'], resultSpaApp['data']) data = json.loads(resultSpaUrl['data']) self.assertEquals('Mensaje1_1', data['message1_1']) self.assertEquals('Mensaje2_1', data['message2_1']) self.assertEquals('Mensaje3_1', data['message3_1']) # This is self-filled by its English version self.assertEquals('Message4_1', data['message4_1']) class TestSync(TranslatorTest): @patch("appcomposer.translator.utils.get_cached_session") @patch("requests.Session") def test_sync(self, mock_requests, mock_requests_cached_session): mock_requests().get = create_requests_mock() mock_requests_cached_session().get = create_requests_mock() graasp_oauth_login_redirect() synchronize_apps_no_cache_wrapper(None) self.assertApp1() @patch("appcomposer.translator.utils.get_cached_session") def test_sync2(self, mock): mock().get = create_requests_mock() synchronize_apps_no_cache_wrapper(None)
Python
0
89c1b58da23cfe16e8e195c61313b818a6d5f890
Add persist.py
darwin/persist.py
darwin/persist.py
import joblib from .version import __version__, VERSION class PersistenceMixin(object): """ Mixin that adds joblib persistence load and save function to any class. """ @classmethod def from_file(cls, objdump_path): ''' Parameters ---------- objdump_path: str Path to the object dump file. Returns ------- instance New instance of an object from the pickle at the specified path. ''' obj_version, object = joblib.load(objdump_path) # Check that we've actually loaded a PersistenceMixin (or sub-class) if not isinstance(object, cls): raise ValueError(('The pickle stored at {} does not contain ' + 'a {} object.').format(objdump_path, cls)) # Check that versions are compatible. (Currently, this just checks # that major versions match) elif obj_version[0] == VERSION[0]: if not hasattr(object, 'sampler'): object.sampler = None return object else: raise ValueError(("{} stored in pickle file {} was created with version {} " "of {}, which is incompatible with the current version " "{}").format(cls, objdump_path, __name__, '.'.join(obj_version), '.'.join(VERSION))) def load(self, objdump_path): '''Replace the current object instance with a saved object. Parameters ---------- objdump_path: str The path to the file to load. ''' del self.__dict__ self.__dict__ = PersistenceMixin.from_file(objdump_path).__dict__ def save(self, objdump_path): '''Save the object to a file. Parameters ---------- objdump_path: str The path to where you want to save the object. ''' # create the directory if it doesn't exist learner_dir = os.path.dirname(objdump_path) if not os.path.exists(learner_dir): os.makedirs(learner_dir) # write out the files joblib.dump((VERSION, self), objdump_path)
import joblib from .version import __version__, VERSION class PersistenceMixin(object): """ Mixin that adds joblib persistence load and save function to any class. """ @classmethod def from_file(cls, objdump_path): ''' Parameters ---------- objdump_path: str Path to the object dump file. Returns ------- instance New instance of an object from the pickle at the specified path. ''' obj_version, object = joblib.load(objdump_path) # Check that we've actually loaded a PersistenceMixin (or sub-class) if not isinstance(object, cls): raise ValueError(('The pickle stored at {} does not contain ' + 'a {} object.').format(objdump_path, cls)) # Check that versions are compatible. (Currently, this just checks # that major versions match) elif obj_version[0] == VERSION[0]: if not hasattr(object, 'sampler'): object.sampler = None return learner else: raise ValueError(("{} stored in pickle file {} was created with version {} " "of {}, which is incompatible with the current version " "{}").format(cls, objdump_path, __name__, '.'.join(obj_version), '.'.join(VERSION))) def load(self, objdump_path): '''Replace the current object instance with a saved object. Parameters ---------- objdump_path: str The path to the file to load. ''' del self.__dict__ self.__dict__ = Learner.from_file(objdump_path).__dict__ def save(self, objdump_path): '''Save the learner to a file. Parameters ---------- objdump_path: str The path to where you want to save the learner. ''' # create the directory if it doesn't exist learner_dir = os.path.dirname(objdump_path) if not os.path.exists(learner_dir): os.makedirs(learner_dir) # write out the files joblib.dump((VERSION, self), objdump_path)
Python
0.000001
735a52b8ad4ebf7b6b8bb47e14667cd9004e624b
add some mappings
algo/lru.py
algo/lru.py
mapping = {} class Node: def __init__(self, val): self.next = None self.prev = None self.value = val class DoublyLinkedList: def __init__(self): self.head = None def insert(self, val): node = Node(val) mapping[val] = node head = self.head if self.head == None: self.head = node else: while head.next != None: head = head.next head.next = node node.prev = head def print_list(self): head = self.head while head != None: print head.value head = head.next if __name__ == '__main__': dll = DoublyLinkedList() for i in range(10): dll.insert(i)
class Node: def __init__(self, val): self.next = None self.prev = None self.value = val class DoublyLinkedList: def __init__(self): self.head = None def insert(self, val): node = Node(val) head = self.head if self.head == None: self.head = node else: while head.next != None: head = head.next head.next = node node.prev = head def print_list(self): head = self.head while head != None: print head.value head = head.next if __name__ == '__main__': dll = DoublyLinkedList() for i in range(10): dll.insert(i)
Python
0.000011
6bb58e13b657c1546f4f5d1afa70d48a9187f168
Update server.py
gprs/server.py
gprs/server.py
from socket import * from modules import decode_packet import sys from modules import params Parser = params.Parser() argv = Parser.createParser() ip_and_port = argv.parse_args(sys.argv[1:]) #host = ip_and_port.ip #port = int(ip_and_port.port) host = "0.0.0.0" port = 5100 addr = (host, port) print(host,port) tcp_socket = socket(AF_INET, SOCK_STREAM) tcp_socket.bind(addr) tcp_socket.listen(10) loop = True while loop: data = None print('wait connection...') conn, addr = tcp_socket.accept() while loop: f = open('logs/gprs.log', 'a+') data = conn.recv(109) decode_packet.insert(data) print(data) if data: f.write(str(data)) f.close() else: f.close() break conn.close() tcp_socket.close()
from socket import * from modules import decode_packet import sys from modules import params Parser = params.Parser() argv = Parser.createParser() ip_and_port = argv.parse_args(sys.argv[1:]) #host = ip_and_port.ip #port = int(ip_and_port.port) host = "0.0.0.0" port = 5300 addr = (host, port) print(host,port) tcp_socket = socket(AF_INET, SOCK_STREAM) tcp_socket.bind(addr) tcp_socket.listen(10) loop = True while loop: data = None print('wait connection...') conn, addr = tcp_socket.accept() while loop: f = open('logs/gprs.log', 'a+') data = conn.recv(109) decode_packet.insert(data) print(data) if data: f.write(str(data)) f.close() else: f.close() break conn.close() tcp_socket.close()
Python
0.000001
9e5b42fa14b50d91840a67646ed6779d8f5c22ae
Make ``cursor_kinds`` private
bears/c_languages/ClangComplexityBear.py
bears/c_languages/ClangComplexityBear.py
from clang.cindex import Index, CursorKind from coalib.bears.LocalBear import LocalBear from coalib.results.Result import Result from coalib.results.SourceRange import SourceRange from bears.c_languages.ClangBear import clang_available, ClangBear class ClangComplexityBear(LocalBear): """ Calculates cyclomatic complexity of each function and displays it to the user. """ LANGUAGES = ClangBear.LANGUAGES REQUIREMENTS = ClangBear.REQUIREMENTS AUTHORS = {'The coala developers'} AUTHORS_EMAILS = {'coala-devel@googlegroups.com'} LICENSE = 'AGPL-3.0' CAN_DETECT = {'Complexity'} check_prerequisites = classmethod(clang_available) _decisive_cursor_kinds = { CursorKind.IF_STMT, CursorKind.WHILE_STMT, CursorKind.FOR_STMT, CursorKind.DEFAULT_STMT, CursorKind.CASE_STMT} def function_key_points(self, cursor, top_function_level=False): """ Calculates number of function's decision points and exit points. :param top_function_level: Whether cursor is in the top level of the function. """ decisions, exits = 0, 0 for child in cursor.get_children(): if child.kind in self._decisive_cursor_kinds: decisions += 1 elif child.kind == CursorKind.RETURN_STMT: exits += 1 if top_function_level: # There is no point to move forward, so just return. return decisions, exits child_decisions, child_exits = self.function_key_points(child) decisions += child_decisions exits += child_exits if top_function_level: # Implicit return statement. exits += 1 return decisions, exits def complexities(self, cursor, filename): """ Calculates cyclomatic complexities of functions. """ file = cursor.location.file if file is not None and file.name != filename: # There is nothing to do in another file. return if cursor.kind == CursorKind.FUNCTION_DECL: child = next((child for child in cursor.get_children() if child.kind != CursorKind.PARM_DECL), None) if child: decisions, exits = self.function_key_points(child, True) complexity = max(1, decisions - exits + 2) yield cursor, complexity else: for child in cursor.get_children(): yield from self.complexities(child, filename) def run(self, filename, file, max_complexity: int=8): """ Check for all functions if they are too complicated using the cyclomatic complexity metric. You can read more about this metric at <https://www.wikiwand.com/en/Cyclomatic_complexity>. :param max_complexity: Maximum cyclomatic complexity that is considered to be normal. The value of 10 had received substantial corroborating evidence. But the general recommendation: "For each module, either limit cyclomatic complexity to [the agreed-upon limit] or provide a written explanation of why the limit was exceeded." """ root = Index.create().parse(filename).cursor for cursor, complexity in self.complexities(root, filename): if complexity > max_complexity: affected_code = (SourceRange.from_clang_range(cursor.extent),) yield Result( self, "The function '{function}' should be simplified. Its " "cyclomatic complexity is {complexity} which exceeds " "maximal recommended value " "of {rec_value}.".format( function=cursor.displayname, complexity=complexity, rec_value=max_complexity), affected_code=affected_code, additional_info=( "The cyclomatic complexity is a metric that measures " "how complicated a function is by counting branches " "and exits of each function.\n\n" "Your function seems to be complicated and should be " "refactored so that it can be understood by other " "people easily.\n\nSee " "<http://www.wikiwand.com/en/Cyclomatic_complexity>" " for more information."))
from clang.cindex import Index, CursorKind from coalib.bears.LocalBear import LocalBear from coalib.results.Result import Result from coalib.results.SourceRange import SourceRange from bears.c_languages.ClangBear import clang_available, ClangBear class ClangComplexityBear(LocalBear): """ Calculates cyclomatic complexity of each function and displays it to the user. """ LANGUAGES = ClangBear.LANGUAGES REQUIREMENTS = ClangBear.REQUIREMENTS AUTHORS = {'The coala developers'} AUTHORS_EMAILS = {'coala-devel@googlegroups.com'} LICENSE = 'AGPL-3.0' CAN_DETECT = {'Complexity'} check_prerequisites = classmethod(clang_available) decisive_cursor_kinds = { CursorKind.IF_STMT, CursorKind.WHILE_STMT, CursorKind.FOR_STMT, CursorKind.DEFAULT_STMT, CursorKind.CASE_STMT} def function_key_points(self, cursor, top_function_level=False): """ Calculates number of function's decision points and exit points. :param top_function_level: Whether cursor is in the top level of the function. """ decisions, exits = 0, 0 for child in cursor.get_children(): if child.kind in self.decisive_cursor_kinds: decisions += 1 elif child.kind == CursorKind.RETURN_STMT: exits += 1 if top_function_level: # There is no point to move forward, so just return. return decisions, exits child_decisions, child_exits = self.function_key_points(child) decisions += child_decisions exits += child_exits if top_function_level: # Implicit return statement. exits += 1 return decisions, exits def complexities(self, cursor, filename): """ Calculates cyclomatic complexities of functions. """ file = cursor.location.file if file is not None and file.name != filename: # There is nothing to do in another file. return if cursor.kind == CursorKind.FUNCTION_DECL: child = next((child for child in cursor.get_children() if child.kind != CursorKind.PARM_DECL), None) if child: decisions, exits = self.function_key_points(child, True) complexity = max(1, decisions - exits + 2) yield cursor, complexity else: for child in cursor.get_children(): yield from self.complexities(child, filename) def run(self, filename, file, max_complexity: int=8): """ Check for all functions if they are too complicated using the cyclomatic complexity metric. You can read more about this metric at <https://www.wikiwand.com/en/Cyclomatic_complexity>. :param max_complexity: Maximum cyclomatic complexity that is considered to be normal. The value of 10 had received substantial corroborating evidence. But the general recommendation: "For each module, either limit cyclomatic complexity to [the agreed-upon limit] or provide a written explanation of why the limit was exceeded." """ root = Index.create().parse(filename).cursor for cursor, complexity in self.complexities(root, filename): if complexity > max_complexity: affected_code = (SourceRange.from_clang_range(cursor.extent),) yield Result( self, "The function '{function}' should be simplified. Its " "cyclomatic complexity is {complexity} which exceeds " "maximal recommended value " "of {rec_value}.".format( function=cursor.displayname, complexity=complexity, rec_value=max_complexity), affected_code=affected_code, additional_info=( "The cyclomatic complexity is a metric that measures " "how complicated a function is by counting branches " "and exits of each function.\n\n" "Your function seems to be complicated and should be " "refactored so that it can be understood by other " "people easily.\n\nSee " "<http://www.wikiwand.com/en/Cyclomatic_complexity>" " for more information."))
Python
0
22952f57c33070f83c4e9c38b2a96543ed983f4e
Make ndb_persistence execute Context's complete event
furious/extras/appengine/ndb_persistence.py
furious/extras/appengine/ndb_persistence.py
# # Copyright 2014 WebFilings, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """This module contains the default functions to use when performing persistence operations backed by the App Engine ndb library. """ import logging from google.appengine.ext import ndb class FuriousContextNotFoundError(Exception): """FuriousContext entity not found in the datastore.""" class FuriousContext(ndb.Model): context = ndb.JsonProperty(indexed=False, compressed=True) @classmethod def from_context(cls, context): """Create a `cls` entity from a context.""" return cls(id=context.id, context=context.to_dict()) @classmethod def from_id(cls, id): """Load a `cls` entity and instantiate the Context it stores.""" from furious.context import Context # TODO: Handle exceptions and retries here. entity = cls.get_by_id(id) if not entity: raise FuriousContextNotFoundError( "Context entity not found for: {}".format(id)) return Context.from_dict(entity.context) class FuriousAsyncMarker(ndb.Model): """This entity serves as a 'complete' marker.""" pass def context_completion_checker(async): """Check if all Async jobs within a Context have been run.""" context_id = async.context_id logging.debug("Check completion for: %s", context_id) context = FuriousContext.from_id(context_id) logging.debug("Loaded context.") task_ids = context.task_ids logging.debug(task_ids) offset = 10 for index in xrange(0, len(task_ids), offset): keys = [ndb.Key(FuriousAsyncMarker, id) for id in task_ids[index:index + offset]] markers = ndb.get_multi(keys) if not all(markers): logging.debug("Not all Async's complete") return False logging.debug("All Async's complete!!") context.exec_event_handler('complete') return True def store_context(context): """Persist a Context object to the datastore.""" logging.debug("Attempting to store Context %s.", context.id) entity = FuriousContext.from_context(context) # TODO: Handle exceptions and retries here. key = entity.put() logging.debug("Stored Context with key: %s.", key) def store_async_result(async): """Persist the Async's result to the datastore.""" logging.debug("Storing result for %s", async) pass def store_async_marker(async): """Persist a marker indicating the Async ran to the datastore.""" logging.debug("Attempting to mark Async %s complete.", async.id) # TODO: Handle exceptions and retries here. key = FuriousAsyncMarker(id=async.id).put() logging.debug("Marked Async complete using marker: %s.", key)
# # Copyright 2014 WebFilings, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """This module contains the default functions to use when performing persistence operations backed by the App Engine ndb library. """ import logging from google.appengine.ext import ndb class FuriousContextNotFoundError(Exception): """FuriousContext entity not found in the datastore.""" class FuriousContext(ndb.Model): context = ndb.JsonProperty(indexed=False, compressed=True) @classmethod def from_context(cls, context): """Create a `cls` entity from a context.""" return cls(id=context.id, context=context.to_dict()) @classmethod def from_id(cls, id): """Load a `cls` entity and instantiate the Context it stores.""" from furious.context import Context # TODO: Handle exceptions and retries here. entity = cls.get_by_id(id) if not entity: raise FuriousContextNotFoundError( "Context entity not found for: {}".format(id)) return Context.from_dict(entity.context) class FuriousAsyncMarker(ndb.Model): """This entity serves as a 'complete' marker.""" pass def context_completion_checker(async): """Check if all Async jobs within a Context have been run.""" context_id = async.context_id logging.debug("Check completion for: %s", context_id) context = FuriousContext.from_id(context_id) logging.debug("Loaded context.") task_ids = context.task_ids logging.debug(task_ids) offset = 10 for index in xrange(0, len(task_ids), offset): keys = [ndb.Key(FuriousAsyncMarker, id) for id in task_ids[index:index + offset]] markers = ndb.get_multi(keys) if not all(markers): logging.debug("Not all Async's complete") return False logging.debug("All Async's complete!!") return True def store_context(context): """Persist a Context object to the datastore.""" logging.debug("Attempting to store Context %s.", context.id) entity = FuriousContext.from_context(context) # TODO: Handle exceptions and retries here. key = entity.put() logging.debug("Stored Context with key: %s.", key) def store_async_result(async): """Persist the Async's result to the datastore.""" logging.debug("Storing result for %s", async) pass def store_async_marker(async): """Persist a marker indicating the Async ran to the datastore.""" logging.debug("Attempting to mark Async %s complete.", async.id) # TODO: Handle exceptions and retries here. key = FuriousAsyncMarker(id=async.id).put() logging.debug("Marked Async complete using marker: %s.", key)
Python
0.000009
d428f6df195c0293340089b884b934fa16ef7ff6
Use local timezone if available. Fixes #3
wanikani/cli.py
wanikani/cli.py
import argparse import logging import os # If the tzlocal package is installed, then we will help the user out # and print things out in the local timezone LOCAL_TIMEZONE = None try: import tzlocal LOCAL_TIMEZONE = tzlocal.get_localzone() except ImportError: pass from wanikani.core import WaniKani, Radical, Kanji, Vocabulary CONFIG_PATH = os.path.join(os.path.expanduser('~'), '.wanikani') logger = logging.getLogger(__name__) def config(): if os.path.exists(CONFIG_PATH): logger.debug('Loading config from %s', CONFIG_PATH) with open(CONFIG_PATH) as f: return f.read().strip() return '' def main(): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() # Global Options parser.add_argument('-a', '--api-key', default=config()) parser.add_argument('-d', '--debug', action='store_const', const=logging.DEBUG, default=logging.WARNING ) def profile(client, args): p = client.profile() print 'Username:', p['username'] print 'Level:', p['level'] profile.parser = subparsers.add_parser('profile') profile.parser.set_defaults(func=profile) def level_progress(client, args): p = client.level_progress() print p['user_information']['username'], 'level', p['user_information']['level'] print 'Radicals:', p['radicals_total'] print 'Kanji:', p['kanji_total'] level_progress.parser = subparsers.add_parser('progress') level_progress.parser.set_defaults(func=level_progress) def recent_unlocks(client, args): p = client.recent_unlocks() print p['user_information']['username'], 'level', p['user_information']['level'] for item in p['items']: print item['level'], item['character'] recent_unlocks.parser = subparsers.add_parser('unlocks') recent_unlocks.parser.set_defaults(func=recent_unlocks) def upcoming(client, args): queue = client.upcoming() for ts in sorted(queue): if len(queue[ts]): radicals, kanji, vocab, total = 0, 0, 0, 0 for obj in queue[ts]: total += 1 if isinstance(obj, Radical): radicals += 1 if isinstance(obj, Kanji): kanji += 1 if isinstance(obj, Vocabulary): vocab += 1 if LOCAL_TIMEZONE: ts.replace(tzinfo=LOCAL_TIMEZONE) # Note the trailing commas, # We only want a newline for the last one print ts, print 'Total:', total, print 'Radials:', radicals, print 'Kanji:', kanji, print 'Vocab:', vocab upcoming.parser = subparsers.add_parser('upcoming') upcoming.parser.set_defaults(func=upcoming) def set_key(client, args): with open(CONFIG_PATH, 'w') as f: f.write(args.api_key) print 'Wrote {0} to {1}'.format(args.api_key, CONFIG_PATH) set_key.parser = subparsers.add_parser('set_key') set_key.parser.set_defaults(func=set_key) set_key.parser.add_argument('api_key',help="New API Key") args = parser.parse_args() logging.basicConfig(level=args.debug) client = WaniKani(args.api_key) args.func(client, args)
import argparse import logging import os from wanikani.core import WaniKani, Radical, Kanji, Vocabulary CONFIG_PATH = os.path.join(os.path.expanduser('~'), '.wanikani') logger = logging.getLogger(__name__) def config(): if os.path.exists(CONFIG_PATH): logger.debug('Loading config from %s', CONFIG_PATH) with open(CONFIG_PATH) as f: return f.read().strip() return '' def main(): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() # Global Options parser.add_argument('-a', '--api-key', default=config()) parser.add_argument('-d', '--debug', action='store_const', const=logging.DEBUG, default=logging.WARNING ) def profile(client, args): p = client.profile() print 'Username:', p['username'] print 'Level:', p['level'] profile.parser = subparsers.add_parser('profile') profile.parser.set_defaults(func=profile) def level_progress(client, args): p = client.level_progress() print p['user_information']['username'], 'level', p['user_information']['level'] print 'Radicals:', p['radicals_total'] print 'Kanji:', p['kanji_total'] level_progress.parser = subparsers.add_parser('progress') level_progress.parser.set_defaults(func=level_progress) def recent_unlocks(client, args): p = client.recent_unlocks() print p['user_information']['username'], 'level', p['user_information']['level'] for item in p['items']: print item['level'], item['character'] recent_unlocks.parser = subparsers.add_parser('unlocks') recent_unlocks.parser.set_defaults(func=recent_unlocks) def upcoming(client, args): queue = client.upcoming() for ts in sorted(queue): if len(queue[ts]): radicals, kanji, vocab, total = 0, 0, 0, 0 for obj in queue[ts]: total += 1 if isinstance(obj, Radical): radicals += 1 if isinstance(obj, Kanji): kanji += 1 if isinstance(obj, Vocabulary): vocab += 1 # Note the trailing commas, # We only want a newline for the last one print ts, print 'Total:', total, print 'Radials:', radicals, print 'Kanji:', kanji, print 'Vocab:', vocab upcoming.parser = subparsers.add_parser('upcoming') upcoming.parser.set_defaults(func=upcoming) def set_key(client, args): with open(CONFIG_PATH, 'w') as f: f.write(args.api_key) print 'Wrote {0} to {1}'.format(args.api_key, CONFIG_PATH) set_key.parser = subparsers.add_parser('set_key') set_key.parser.set_defaults(func=set_key) set_key.parser.add_argument('api_key',help="New API Key") args = parser.parse_args() logging.basicConfig(level=args.debug) client = WaniKani(args.api_key) args.func(client, args)
Python
0.000001
b53bee8978c6fe407fce7769e16ac4991e36fcda
Return unknown status if geolocation API is unavailable
client/plugins/geolocation.py
client/plugins/geolocation.py
#!/usr/bin/env python3 import pickle import json import os import re import requests import subprocess import sys from qlmdm import top_dir, var_dir from qlmdm.client import get_setting cache_file = os.path.join(var_dir, 'geolocation.cache') os.chdir(top_dir) def unknown(): print(json.dumps('unknown')) sys.exit() def old_data_is_good(old_data, ip_addresses, access_points): if 'response' not in old_data: return False try: old_ip_addresses = set(old_data['ip_addresses'].values()) except: old_ip_addresses = set() new_ip_addresses = set(ip_addresses.values()) if old_ip_addresses != new_ip_addresses: return False new_mac_addresses = set(a['macAddress'] for a in access_points) if not new_mac_addresses: return True try: old_mac_addresses = set(a['macAddress'] for a in old_data['access_points']) except: old_mac_addresses = set() percentage_overlap = (100 * len(new_mac_addresses & old_mac_addresses) / len(new_mac_addresses)) if percentage_overlap > 74: return True return False api_key = get_setting('geolocation_api_key') if not api_key: unknown() address_re = re.compile( r'\bAddress:\s*([0-9a-f][0-9a-f](?::[0-9a-f][0-9a-f])*)', re.IGNORECASE) signal_re = re.compile(r'\bSignal level=(-\d+)\d*dBm') channel_re = re.compile(r'\bChannel:\s*(\d+)') access_points = {} ip_addresses = json.loads( subprocess.check_output('client/plugins/ip_addresses.py').decode('ascii')) try: old_data = pickle.load(open(cache_file, 'rb')) except: old_data = {} # iwlist returns slightly different results every time, so we need to run it # several times and merge the output. for i in range(5): try: output = subprocess.check_output( ('iwlist', 'scan'), stderr=subprocess.STDOUT).decode('ascii') except: unknown() for cell in re.split(r'\n\s+Cell \d+ ', output): ap = {} match = address_re.search(cell) if not match: continue ap['macAddress'] = match.group(1).lower() match = signal_re.search(cell) if match: ap['signalStrength'] = match.group(1) match = channel_re.search(cell) if match: ap['channel'] = match.group(1) access_points[ap['macAddress']] = ap # To conserve API quota, don't submit if WiFi access points match the last # call's 75% or more and the IP addresses haven't changed. if old_data_is_good(old_data, ip_addresses, access_points.values()): sys.stderr.write('Using old data\n') print(json.dumps(old_data['response'])) sys.exit() data = {} if access_points: data['wifiAccessPoints'] = list(access_points.values()) url = 'https://www.googleapis.com/geolocation/v1/geolocate?key={}'.format( api_key) try: response = requests.post(url, data=json.dumps(data), timeout=5) response.raise_for_status() except: unknown() old_data = { 'response': response.json(), 'ip_addresses': ip_addresses, 'access_points': access_points, } pickle.dump(old_data, open(cache_file, 'wb')) print(json.dumps(response.json()))
#!/usr/bin/env python3 import pickle import json import os import re import requests import subprocess import sys from qlmdm import top_dir, var_dir from qlmdm.client import get_setting cache_file = os.path.join(var_dir, 'geolocation.cache') os.chdir(top_dir) def unknown(): print(json.dumps('unknown')) sys.exit() def old_data_is_good(old_data, ip_addresses, access_points): if 'response' not in old_data: return False try: old_ip_addresses = set(old_data['ip_addresses'].values()) except: old_ip_addresses = set() new_ip_addresses = set(ip_addresses.values()) if old_ip_addresses != new_ip_addresses: return False new_mac_addresses = set(a['macAddress'] for a in access_points) if not new_mac_addresses: return True try: old_mac_addresses = set(a['macAddress'] for a in old_data['access_points']) except: old_mac_addresses = set() percentage_overlap = (100 * len(new_mac_addresses & old_mac_addresses) / len(new_mac_addresses)) if percentage_overlap > 74: return True return False api_key = get_setting('geolocation_api_key') if not api_key: unknown() address_re = re.compile( r'\bAddress:\s*([0-9a-f][0-9a-f](?::[0-9a-f][0-9a-f])*)', re.IGNORECASE) signal_re = re.compile(r'\bSignal level=(-\d+)\d*dBm') channel_re = re.compile(r'\bChannel:\s*(\d+)') access_points = {} ip_addresses = json.loads( subprocess.check_output('client/plugins/ip_addresses.py').decode('ascii')) try: old_data = pickle.load(open(cache_file, 'rb')) except: old_data = {} # iwlist returns slightly different results every time, so we need to run it # several times and merge the output. for i in range(5): try: output = subprocess.check_output( ('iwlist', 'scan'), stderr=subprocess.STDOUT).decode('ascii') except: unknown() for cell in re.split(r'\n\s+Cell \d+ ', output): ap = {} match = address_re.search(cell) if not match: continue ap['macAddress'] = match.group(1).lower() match = signal_re.search(cell) if match: ap['signalStrength'] = match.group(1) match = channel_re.search(cell) if match: ap['channel'] = match.group(1) access_points[ap['macAddress']] = ap # To conserve API quota, don't submit if WiFi access points match the last # call's 75% or more and the IP addresses haven't changed. if old_data_is_good(old_data, ip_addresses, access_points.values()): sys.stderr.write('Using old data\n') print(json.dumps(old_data['response'])) sys.exit() data = {} if access_points: data['wifiAccessPoints'] = list(access_points.values()) url = 'https://www.googleapis.com/geolocation/v1/geolocate?key={}'.format( api_key) response = requests.post(url, data=json.dumps(data)) try: response.raise_for_status() except: unknown() old_data = { 'response': response.json(), 'ip_addresses': ip_addresses, 'access_points': access_points, } pickle.dump(old_data, open(cache_file, 'wb')) print(json.dumps(response.json()))
Python
0.000002
85775847e93b35ac19e09962bc2b10f9be666e33
Update analysis.py with new finallist.py method
analysis.py
analysis.py
import random import linecache from unidecode import unidecode # Process links into list finallist = [None] * 5716809 with open('links-simple-sorted.txt', 'r') as src: for line in src: [oNode, dNode] = line.split(':') finallist[int(oNode)] = dNode.rstrip('\n')[1:] # ACTUALLY: pick a random line in links-sorted, and translate the numbers from there # Get a random node, and pull that line from the links doc; want this to be an option oNode = random.randint(1,5706070) dNode = finallist[oNode] dNode = dNode.split(' ') # Translate these into titles and print the result oname = linecache.getline('titles-sorted.txt',int(oNode)) oname = oname[:-1] # Gets rid of the trailing newline print '\nORIGIN NODE: ' + oname + '\n' print 'DESTINATION NODES:' for thisnum in dNode: dname = linecache.getline('titles-sorted.txt',int(thisnum))[:-1] print ' ' + dname print '\n'
import random import linecache from unidecode import unidecode # ACTUALLY: pick a random line in links-sorted, and translate the numbers from there # Get a random node, and pull that line from the links doc––want this to be an option # Pull from links because some titles don't have link lines lineno = random.randint(1,5706070) linestr = linecache.getline('links-simple-sorted.txt',lineno) # Process the string to split the "from" and "to" numbers [origin, dest] = linestr.split(':') dest = dest[1:-1] # Gets rid of the first space and trailing newline dest = dest.split(' ') # Split at spaces # Translate these into title oname = lincache.getline('titles-sorted.txt',int(origin)) oname = oname[:-1] # Gets rid of the trailing newline UNIoname = unidecode(u oname) for thisnum in dest: dname = linecache.getline('titles-sorted.txt',int(thisnum))[:-1] UNIdname = unidecode(linecache.getline('titles-sorted.txt', int(thisnum))[:-1]) # Get some stats bro linksout = len(dest) # To get linksin need an adjacency matrix def assemblematrix(): # Something with links-simple-sorted.txt # Parse that shit in def linksin(node): # Locations of value "1" in the row int(node) def linksout(node): # Locations of value "1" in the col int(node)
Python
0
6a3f0ade1d8fe16eeda6d339220b7ef877b402e5
Add no-break options
LFI.TESTER.py
LFI.TESTER.py
''' @KaiyiZhang Github ''' import sys import urllib2 import getopt import time target = '' depth = 6 file = 'etc/passwd' html = '' prefix = '' url = '' keyword = 'root' force = False def usage(): print "LFI.Tester.py Help:" print "Usage: LFI.TESTER.py -t [-d] [-f] [-k]" print " -t,--target The test url" print " -d,--depth The depth for test (Default is 6)" print " -f,--file The File include (Default is etc/passwd)" print " -k,--keyword the keyword for vuln check (Default is root)" try: if len(sys.argv) < 2: usage() sys.exit() opts,args = getopt.getopt(sys.argv[1:],"ht:d:f:k:n",["help","target=","depth=","file=","keyword=","no-break"]) for opt, arg in opts: if opt in("-h","--help"): usage() sys.exit() if opt in("-t","--target"): target = arg if not target.startswith('http://', 0, 7): target = 'http://' + target if opt in("-d","--depth"): depth = int(arg) if depth < 1: usage() sys.exit() if opt in("-f","--file"): file = arg if file.startswith('/',0,1): file =file[1:] if opt in("-k","--keyword"): keyword = arg #print keyword if opt in("-n","--no-break"): force = True except getopt.GetoptError: usage() sys.exit(2) for i in range(0,depth): prefix += '../' url = target + prefix + file print "Testing: ",url try: response = urllib2.urlopen(url) #print response.info() html = response.read() #print html except: pass if keyword in html: print url, " is Vulnerable" if not force: break else: continue else: time.sleep(2) continue
''' @KaiyiZhang Github ''' import sys import urllib2 import getopt import time target = '' depth = 6 file = 'etc/passwd' html = '' prefix = '' url = '' keyword='root' def usage(): print "LFI.Tester.py Help:" print "Usage: LFI.TESTER.py -t [-d] [-f] [-k]" print " -t,--target The test url" print " -d,--depth The depth for test (Default is 6)" print " -f,--file The File include (Default is etc/passwd)" print " -k,--keyword the keyword for vuln check (Default is root)" try: if len(sys.argv) < 2: usage() sys.exit() opts,args = getopt.getopt(sys.argv[1:],"ht:d:f:k:",["help","target=","depth=","file=","keyword="]) for opt, arg in opts: if opt in("-h","--help"): usage() sys.exit() if opt in("-t","--target"): target = arg if not target.startswith('http://', 0, 7): target = 'http://' + target if opt in("-d","--depth"): depth = int(arg) if depth < 1: usage() sys.exit() if opt in("-f","--file"): file = arg if file.startswith('/',0,1): file =file[1:] if opt in("-k","--keyword"): keyword = arg #print keyword except getopt.GetoptError: usage() sys.exit(2) for i in range(0,depth): prefix += '../' url = target + prefix + file print "Testing: ",url try: response = urllib2.urlopen(url) #print response.info() html = response.read() #print html except: pass if(keyword in html): print url, " is Vulnerable" break else: time.sleep(2) continue
Python
0.998376
68c0c054e5b9874f8a6423c35fb83c9de351b9e0
fix doc build
examples/plot_benktander.py
examples/plot_benktander.py
""" ==================================================================== Benktander: Relationship between Chainladder and BornhuetterFerguson ==================================================================== This example demonstrates the relationship between the Chainladder and BornhuetterFerguson methods by way fo the Benktander model. Each is a special case of the Benktander model where ``n_iters = 1`` for BornhuetterFerguson and as ``n_iters`` approaches infinity yields the chainladder. As ``n_iters`` increases the apriori selection becomes less relevant regardless of initial choice. """ import chainladder as cl # Load Data clrd = cl.load_sample('clrd') medmal_paid = clrd.groupby('LOB').sum().loc['medmal', 'CumPaidLoss'] medmal_prem = clrd.groupby('LOB').sum().loc['medmal', 'EarnedPremDIR'].latest_diagonal # Generate LDFs and Tail Factor medmal_paid = cl.Development().fit_transform(medmal_paid) medmal_paid = cl.TailCurve().fit_transform(medmal_paid) # Benktander Model benk = cl.Benktander() # Prep Benktander Grid Search with various assumptions, and a scoring function param_grid = dict(n_iters=list(range(1,100,2)), apriori=[0.50, 0.75, 1.00]) scoring = {'IBNR':lambda x: x.ibnr_.sum()} grid = cl.GridSearch(benk, param_grid, scoring=scoring) # Perform Grid Search grid.fit(medmal_paid, sample_weight=medmal_prem) # Plot data grid.results_.pivot(index='n_iters', columns='apriori', values='IBNR').plot( title='Benktander convergence to Chainladder', grid=True).set(ylabel='IBNR')
""" ==================================================================== Benktander: Relationship between Chainladder and BornhuetterFerguson ==================================================================== This example demonstrates the relationship between the Chainladder and BornhuetterFerguson methods by way fo the Benktander model. Each is a special case of the Benktander model where ``n_iters = 1`` for BornhuetterFerguson and as ``n_iters`` approaches infinity yields the chainladder. As ``n_iters`` increases the apriori selection becomes less relevant regardless of initial choice. """ import chainladder as cl # Load Data clrd = cl.load_sample('clrd') medmal_paid = clrd.groupby('LOB').sum().loc['medmal', 'CumPaidLoss'] medmal_prem = clrd.groupby('LOB').sum().loc['medmal', 'EarnedPremDIR'].latest_diagonal medmal_prem.rename('development', ['premium']) # Generate LDFs and Tail Factor medmal_paid = cl.Development().fit_transform(medmal_paid) medmal_paid = cl.TailCurve().fit_transform(medmal_paid) # Benktander Model benk = cl.Benktander() # Prep Benktander Grid Search with various assumptions, and a scoring function param_grid = dict(n_iters=list(range(1,100,2)), apriori=[0.50, 0.75, 1.00]) scoring = {'IBNR':lambda x: x.ibnr_.sum()} grid = cl.GridSearch(benk, param_grid, scoring=scoring) # Perform Grid Search grid.fit(medmal_paid, sample_weight=medmal_prem) # Plot data grid.results_.pivot(index='n_iters', columns='apriori', values='IBNR').plot( title='Benktander convergence to Chainladder', grid=True).set(ylabel='IBNR')
Python
0
15307ebe2c19c1a3983b0894152ba81fdde34619
Add comment on dist of first function
exp/descriptivestats.py
exp/descriptivestats.py
import pandas import numpy import matplotlib.pyplot as plt def univariate_stats(): # Generate 1000 random numbers from a normal distribution num_examples = 1000 z = pandas.Series(numpy.random.randn(num_examples)) # Minimum print(z.min()) # Maximum print(z.max()) # Mean print(z.mean()) # Median print(z.median()) # Variance print(z.var()) # Standard deviation print(z.std()) # Mean absolute deviation print(z.mad()) # Interquartile range print(z.quantile(0.75) - z.quantile(0.25)) z.plot(kind="hist") def multivariate_stats(): num_examples = 1000 x = pandas.Series(numpy.random.randn(num_examples)) y = x + pandas.Series(numpy.random.randn(num_examples)) z = x + pandas.Series(numpy.random.randn(num_examples)) # Covariance print(y.cov(z)) # Covariance of y with itself is equal to variance print(y.cov(y), y.var()) # Correlation print(y.corr(z)) univariate_stats() multivariate_stats() plt.show()
import pandas import numpy import matplotlib.pyplot as plt def univariate_stats(): num_examples = 1000 z = pandas.Series(numpy.random.randn(num_examples)) # Minimum print(z.min()) # Maximum print(z.max()) # Mean print(z.mean()) # Median print(z.median()) # Variance print(z.var()) # Standard deviation print(z.std()) # Mean absolute deviation print(z.mad()) # Interquartile range print(z.quantile(0.75) - z.quantile(0.25)) z.plot(kind="hist") def multivariate_stats(): num_examples = 1000 x = pandas.Series(numpy.random.randn(num_examples)) y = x + pandas.Series(numpy.random.randn(num_examples)) z = x + pandas.Series(numpy.random.randn(num_examples)) # Covariance print(y.cov(z)) # Covariance of y with itself is equal to variance print(y.cov(y), y.var()) # Correlation print(y.corr(z)) univariate_stats() multivariate_stats() plt.show()
Python
0
7ff6a0dc3a4f6f1ed47f999340f25fe3d5546bd4
fix command order in shell help test
tests/ps_schedstatistics/tests/01-run.py
tests/ps_schedstatistics/tests/01-run.py
#!/usr/bin/env python3 # Copyright (C) 2017 Inria # # This file is subject to the terms and conditions of the GNU Lesser # General Public License v2.1. See the file LICENSE in the top level # directory for more details. import sys from testrunner import run PS_EXPECTED = ( (r'\tpid | name | state Q | pri | stack \( used\) | ' r'base addr | current | runtime | switches'), (r'\t - | isr_stack | - - | - | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+'), (r'\t 1 | idle | pending Q | 15 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 2 | main | running Q | 7 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 3 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 4 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 5 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 6 | thread | bl mutex _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 7 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t | SUM | | | \d+ \(\d+\)') ) def _check_startup(child): for i in range(5): child.expect_exact('Creating thread #{}, next={}' .format(i, (i + 1) % 5)) def _check_help(child): child.sendline('') child.expect_exact('>') child.sendline('help') child.expect_exact('Command Description') child.expect_exact('---------------------------------------') child.expect_exact('ps Prints information about ' 'running threads.') child.expect_exact('reboot Reboot the node') def _check_ps(child): child.sendline('ps') for line in PS_EXPECTED: child.expect(line) # Wait for all lines of the ps output to be displayed child.expect_exact('>') def testfunc(child): _check_startup(child) _check_help(child) _check_ps(child) if __name__ == "__main__": sys.exit(run(testfunc))
#!/usr/bin/env python3 # Copyright (C) 2017 Inria # # This file is subject to the terms and conditions of the GNU Lesser # General Public License v2.1. See the file LICENSE in the top level # directory for more details. import sys from testrunner import run PS_EXPECTED = ( (r'\tpid | name | state Q | pri | stack \( used\) | ' r'base addr | current | runtime | switches'), (r'\t - | isr_stack | - - | - | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+'), (r'\t 1 | idle | pending Q | 15 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 2 | main | running Q | 7 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 3 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 4 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 5 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 6 | thread | bl mutex _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 7 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t | SUM | | | \d+ \(\d+\)') ) def _check_startup(child): for i in range(5): child.expect_exact('Creating thread #{}, next={}' .format(i, (i + 1) % 5)) def _check_help(child): child.sendline('') child.expect_exact('>') child.sendline('help') child.expect_exact('Command Description') child.expect_exact('---------------------------------------') child.expect_exact('reboot Reboot the node') child.expect_exact('ps Prints information about ' 'running threads.') def _check_ps(child): child.sendline('ps') for line in PS_EXPECTED: child.expect(line) # Wait for all lines of the ps output to be displayed child.expect_exact('>') def testfunc(child): _check_startup(child) _check_help(child) _check_ps(child) if __name__ == "__main__": sys.exit(run(testfunc))
Python
0.000001
9af7c8bfc22a250ce848d50ca26877e177f767c1
Fix execution on Monday
management.py
management.py
from logging import _nameToLevel as nameToLevel from argparse import ArgumentParser from Common.emailer import Emailer from DesksReminder.reminders import HelpDeskTechReminder, HelpDeskLabReminder, HelpDeskOtherReminder, \ UrgentDeskReminder, AccountsDeskReminder from HelpDesk.synchronization import AskbotSync, HelpDeskCaretaker from HelpDesk.stackoverflowsync import StackOverflowSync from urllib3 import disable_warnings from urllib3.exceptions import InsecureRequestWarning from datetime import datetime __author__ = 'Fernando López' __version__ = "1.3.0" def init(): parser = ArgumentParser(prog='Jira Management Scripts', description='') parser.add_argument('-l', '--log', default='INFO', help='The logging level to be used.') args = parser.parse_args() loglevel = None try: loglevel = nameToLevel[args.log.upper()] except Exception as e: print('Invalid log level: {}'.format(args.log)) print('Please use one of the following values:') print(' * CRITICAL') print(' * ERROR') print(' * WARNING') print(' * INFO') print(' * DEBUG') print(' * NOTSET') exit() return loglevel if __name__ == "__main__": loglevel = init() mailer = Emailer(loglevel=loglevel) disable_warnings(InsecureRequestWarning) today = datetime.today().weekday() if today == 0: # Send reminder of pending JIRA tickets, only every Mondays techReminder = HelpDeskTechReminder(loglevel=loglevel, mailer=mailer) techReminder.process() labReminder = HelpDeskLabReminder(loglevel=loglevel, mailer=mailer) labReminder.process() otherReminder = HelpDeskOtherReminder(loglevel=loglevel, mailer=mailer) otherReminder.process() urgentReminder = UrgentDeskReminder(loglevel=loglevel, mailer=mailer) urgentReminder.process() accountReminder = AccountsDeskReminder(loglevel=loglevel, mailer=mailer) accountReminder.process() # Askbot synchronization and Jira caretaker actions, every day askbotSync = AskbotSync(loglevel=loglevel) askbotSync.process() # Automatic reassign tickets to owners based on some extracted information, every day helpdeskCaretaker = HelpDeskCaretaker(loglevel=loglevel) helpdeskCaretaker.process() # StackoverFlow synchronization, every day stackoverflowSync = StackOverflowSync(loglevel=loglevel) stackoverflowSync.process(year=2015, month=9, day=21)
from logging import _nameToLevel as nameToLevel from argparse import ArgumentParser from Common.emailer import Emailer from DesksReminder.reminders import HelpDeskTechReminder, HelpDeskLabReminder, HelpDeskOtherReminder, \ UrgentDeskReminder, AccountsDeskReminder from HelpDesk.synchronization import AskbotSync, HelpDeskCaretaker from HelpDesk.stackoverflowsync import StackOverflowSync from urllib3 import disable_warnings from urllib3.exceptions import InsecureRequestWarning from datetime import datetime __author__ = 'Fernando López' __version__ = "1.3.0" def init(): parser = ArgumentParser(prog='Jira Management Scripts', description='') parser.add_argument('-l', '--log', default='INFO', help='The logging level to be used.') args = parser.parse_args() loglevel = None try: loglevel = nameToLevel[args.log.upper()] except Exception as e: print('Invalid log level: {}'.format(args.log)) print('Please use one of the following values:') print(' * CRITICAL') print(' * ERROR') print(' * WARNING') print(' * INFO') print(' * DEBUG') print(' * NOTSET') exit() return loglevel if __name__ == "__main__": loglevel = init() mailer = Emailer(loglevel=loglevel) disable_warnings(InsecureRequestWarning) today = datetime.today().weekday() if today == 2: # Send reminder of pending JIRA tickets, only every Mondays techReminder = HelpDeskTechReminder(loglevel=loglevel, mailer=mailer) techReminder.process() labReminder = HelpDeskLabReminder(loglevel=loglevel, mailer=mailer) labReminder.process() otherReminder = HelpDeskOtherReminder(loglevel=loglevel, mailer=mailer) otherReminder.process() urgentReminder = UrgentDeskReminder(loglevel=loglevel, mailer=mailer) urgentReminder.process() accountReminder = AccountsDeskReminder(loglevel=loglevel, mailer=mailer) accountReminder.process() # Askbot synchronization and Jira caretaker actions, every day askbotSync = AskbotSync(loglevel=loglevel) askbotSync.process() # Automatic reassign tickets to owners based on some extracted information, every day helpdeskCaretaker = HelpDeskCaretaker(loglevel=loglevel) helpdeskCaretaker.process() # StackoverFlow synchronization, every day stackoverflowSync = StackOverflowSync(loglevel=loglevel) stackoverflowSync.process(year=2015, month=9, day=21)
Python
0.000047
c67a32e731037143baf44841bc7e5a8b5e14473c
Add Reverse Method and Initialize an array from an list
LinkedList.py
LinkedList.py
class Node(object): def __init__(self, data=None, next_node=None): self.data = data self.next = next_node class LinkedList(object): #default constructor def __init__(self,array=None): self.head=None self.length=0 if(array!=None): self.initArray(array) #constructor with list as argument def initArray(self,array): for value in array: self.prepend(value) self.reverse() #method to copy a Linked List and to return the copy def copy(self): head2=LinkedList() temp=self.head while (temp!=None): head2.prepend(temp.data) temp=temp.next head2.reverse() return head2 def prepend(self, data): self.head=Node(data,self.head) self.length+=1 def append(self, data): temp=self.head parent=None while(temp!=None): parent=temp temp=temp.next temp=Node(data,None) if(parent==None): self.head=temp else: parent.next=temp self.length+=1 def InsertNth(self,data,position): temp=self.head index=0 parent=None while(index!=position): parent=temp temp=temp.next index+=1 temp=Node(data) if(parent==None): temp.next=self.head self.head=temp else: temp.next=parent.next parent.next=temp self.length+=1 def printLinkedList(self,sep=" "): if(self.length==0): return None temp=self.head while (temp.next!=None): print(str(temp.data),end=sep) temp=temp.next print(temp.data) def getData(self,position): if(self.length<=position): return None temp=self.head index=0 while(index!=position): temp=temp.next index+=1 return temp.data def remove(self,data): temp=self.head parent=None while (temp.data!=data and temp!=None): parent=temp temp=temp.next if(temp==None): return -1 parent.next=temp.next self.length-=1 return 0 def removeAt(self,position): if(self.length<=position): return -1 temp=self.head self.length-=1 index=0 if(position==0): self.head=self.head.next return 0 while(index!=position): parent=temp temp=temp.next index+=1 parent.next=temp.next return 0 def reverse(self): temp=self.head new=None while (temp!=None): next=temp.next temp.next=new new=temp temp=next self.head=new
class Node(object): def __init__(self, data=None, next_node=None): self.data = data self.next = next_node class LinkedList(object): def __init__(self): self.head=None self.length=0 def prepend(self, data): self.head=Node(data,self.head) self.length+=1 def append(self, data): temp=self.head parent=None while(temp!=None): parent=temp temp=temp.next temp=Node(data,None) if(parent==None): self.head=temp else: parent.next=temp self.length+=1 def InsertNth(self,data,position): temp=self.head index=0 parent=None while(index!=position): parent=temp temp=temp.next index+=1 temp=Node(data) if(parent==None): temp.next=self.head self.head=temp else: temp.next=parent.next parent.next=temp self.length+=1 def printLinkedList(self,sep=" "): if(self.length==0): return None temp=self.head while (temp.next!=None): print(str(temp.data),end=sep) temp=temp.next print(temp.data) def getData(self,position): if(self.length<=position): return None temp=self.head index=0 while(index!=position): temp=temp.next index+=1 return temp.data def remove(self,data): temp=self.head parent=None while (temp.data!=data and temp!=None): parent=temp temp=temp.next if(temp==None): return -1 parent.next=temp.next self.length-=1 return 0 def removeAt(self,position): if(self.length<=position): return -1 temp=self.head self.length-=1 index=0 if(position==0): self.head=self.head.next return 0 while(index!=position): parent=temp temp=temp.next index+=1 parent.next=temp.next return 0
Python
0.000001
ecd2821a99dee895f3ab7c5dbcc6d86983268560
Update src url for dev in views
__init__.py
__init__.py
from flask import Flask, request, redirect, url_for from twilio.rest import TwilioRestClient from PIL import Image, ImageDraw, ImageFont import time app = Flask(__name__, static_folder='static', static_url_path='') client = TwilioRestClient( account='ACb01b4d6edfb1b41a8b80f5fed2c19d1a', token='97e6b9c0074b2761eff1375fb088adda' ) @app.route('/', methods=['GET', 'POST']) def send_image(): if request.method == 'GET': return 'The deployment worked! Now copy your browser URL into the' + \ ' Twilio message text box for your phone number.' sender_number = request.form.get('From', '') twilio_number = request.form.get('To', '') user_text = request.form.get('Body', '') image_url, msg_text = mod_photo(user_text) send_mms_twiml(image_url, msg_text, sender_number, twilio_number) return 'ok' def mod_photo(user_text): base = Image.open('static/images/original/portland.jpg').convert('RGBA') txt = Image.new('RGBA', base.size, (255, 255, 255, 0)) fnt = ImageFont.truetype('static/fonts/Gobold.ttf', 30) d = ImageDraw.Draw(txt) d.text( (25, 25), '{}...'.format(user_text), font=fnt, fill=(255, 255, 255, 255) ) image = Image.alpha_composite(base, txt) image.save('static/images/changed/portland_{}.jpg'.format(user_text)) try: msg_text = '{}: Imagine yourself in Portland!'.format(user_text) image_url = 'http://dev.thevariable.com/images/changed/portland_{}.jpg'.format(user_text) except: msg = "Sorry, we couldn't pull a kitten, " + \ "here's a dinosaur instead!" image_url = "https://farm1.staticflickr.com/46/" + \ "154877897_a299d80baa_b_d.jpg" return image_url, msg_text def send_mms_twiml(image_url, msg_text, sender_number, twilio_number): client.messages.create( to=sender_number, from_=twilio_number, body=msg_text, media_url=image_url ) if __name__ == "__main__": app.run(debug=True)
from flask import Flask, request, redirect, url_for from twilio.rest import TwilioRestClient from PIL import Image, ImageDraw, ImageFont import time app = Flask(__name__, static_folder='static', static_url_path='') client = TwilioRestClient( account='ACb01b4d6edfb1b41a8b80f5fed2c19d1a', token='97e6b9c0074b2761eff1375fb088adda' ) @app.route('/', methods=['GET', 'POST']) def send_image(): if request.method == 'GET': return 'The deployment worked! Now copy your browser URL into the' + \ ' Twilio message text box for your phone number.' sender_number = request.form.get('From', '') twilio_number = request.form.get('To', '') user_text = request.form.get('Body', '') image_url, msg_text = mod_photo(user_text) send_mms_twiml(image_url, msg_text, sender_number, twilio_number) return 'ok' def mod_photo(user_text): base = Image.open('static/images/original/portland.jpg').convert('RGBA') txt = Image.new('RGBA', base.size, (255, 255, 255, 0)) fnt = ImageFont.truetype('static/fonts/Gobold.ttf', 30) d = ImageDraw.Draw(txt) d.text( (25, 25), '{}...'.format(user_text), font=fnt, fill=(255, 255, 255, 255) ) image = Image.alpha_composite(base, txt) image.save('static/images/changed/portland_{}.jpg'.format(user_text)) try: msg_text = '{}: Imagine yourself in Portland!'.format(user_text) image_url = 'http://12dcb913.ngrok.com/images/changed/portland_{}.jpg'.format(user_text) except: msg = "Sorry, we couldn't pull a kitten, " + \ "here's a dinosaur instead!" image_url = "https://farm1.staticflickr.com/46/" + \ "154877897_a299d80baa_b_d.jpg" return image_url, msg_text def send_mms_twiml(image_url, msg_text, sender_number, twilio_number): client.messages.create( to=sender_number, from_=twilio_number, body=msg_text, media_url=image_url ) if __name__ == "__main__": app.run(debug=True)
Python
0
598bb39414825ff8ab561babb470b85f06c58020
Update __init__.py
__init__.py
__init__.py
from mlpack.linear_regression import linear_regression from mlpack.logistic_regression import logistic_regression """ MlPack ====== Provides 1. A Variety of Machine learning packages 2. Good and Easy hand written programs with good documentation 3. Linear Regression, Logistic Regression Available subpackages --------------------- 1. Linear Regression 2. Logistic Regression See subpackages for more details. """
from mlpack import linear_regression from mlpack import logistic_regression """ MlPack ====== Provides 1. A Variety of Machine learning packages 2. Good and Easy hand written programs with good documentation 3. Linear Regression, Logistic Regression Available subpackages --------------------- 1. Linear Regression 2. Logistic Regression See subpackages for more details. """
Python
0
7875c6b4848e7c30a6d5a53c2b3c01d7aba5fa65
improve voting results test
scraper/test.py
scraper/test.py
from django.test import TestCase import scraper.documents import scraper.votings # metadata = scraper.documents.get_metadata(document_id='kst-33885-7') # print(metadata) # page_url = 'https://zoek.officielebekendmakingen.nl/kst-33885-7.html?zoekcriteria=%3fzkt%3dEenvoudig%26pst%3d%26vrt%3d33885%26zkd%3dInDeGeheleText%26dpr%3dAfgelopenDag%26spd%3d20160522%26epd%3d20160523%26sdt%3dDatumBrief%26ap%3d%26pnr%3d1%26rpp%3d10%26_page%3d4%26sorttype%3d1%26sortorder%3d4&resultIndex=34&sorttype=1&sortorder=4' # scraper.documents.get_document_id(page_url) # scraper.documents.search_politieknl_dossier(33885) class TestExample(TestCase): """ Example test case """ dossier_nr = 33885 def test_get_voting_pages_for_dossier(self): """ Example test """ expected_urls = [ 'https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2016P10154', 'https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2016P10153' ] votings_urls = scraper.votings.get_voting_pages_for_dossier(self.dossier_nr) self.assertEqual(len(expected_urls), len(votings_urls)) for i in range(len(votings_urls)): self.assertEqual(votings_urls[i], expected_urls[i]) def test_get_votings_for_page(self): voting_page_urls = [ 'https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2016P10154', 'https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2016P10153' ] expected_results = [ {'result': 'Verworpen', 'document_id': '33885-17'}, {'result': 'Aangenomen', 'document_id': '33885-30'}, {'result': 'Verworpen', 'document_id': '33885-19'}, {'result': 'Verworpen', 'document_id': '33885-20'}, {'result': 'Eerder ingetrokken (tijdens debat)', 'document_id': '33885-21'}, {'result': 'Verworpen', 'document_id': '33885-31'}, {'result': 'Aangehouden (tijdens debat)', 'document_id': '33885-23'}, {'result': 'Verworpen', 'document_id': '33885-24'}, {'result': 'Aangehouden (tijdens debat)', 'document_id': '33885-25'}, {'result': 'Verworpen', 'document_id': '33885-26'}, {'result': 'Verworpen', 'document_id': '33885-27'}, {'result': 'Eerder ingetrokken (tijdens debat)', 'document_id': '33885-28'}, {'result': 'Ingetrokken', 'document_id': '33885-14'}, {'result': 'Verworpen', 'document_id': '33885-15'}, {'result': 'Verworpen', 'document_id': '33885-16'}, {'result': 'Verworpen', 'document_id': '33885-10'}, {'result': 'Verworpen', 'document_id': '33885-13'}, {'result': 'Aangenomen', 'document_id': '33885'} ] results = [] for url in voting_page_urls: results += scraper.votings.get_votings_for_page(url) self.assertEqual(len(results), len(expected_results)) for i in range(len(results)): print(results[i]) self.assertEqual(results[i], expected_results[i])
from django.test import TestCase import scraper.documents import scraper.votings # metadata = scraper.documents.get_metadata(document_id='kst-33885-7') # print(metadata) # page_url = 'https://zoek.officielebekendmakingen.nl/kst-33885-7.html?zoekcriteria=%3fzkt%3dEenvoudig%26pst%3d%26vrt%3d33885%26zkd%3dInDeGeheleText%26dpr%3dAfgelopenDag%26spd%3d20160522%26epd%3d20160523%26sdt%3dDatumBrief%26ap%3d%26pnr%3d1%26rpp%3d10%26_page%3d4%26sorttype%3d1%26sortorder%3d4&resultIndex=34&sorttype=1&sortorder=4' # scraper.documents.get_document_id(page_url) # scraper.documents.search_politieknl_dossier(33885) class TestExample(TestCase): """ Example test case """ dossier_nr = 33885 def test_get_voting_pages_for_dossier(self): """ Example test """ expected_urls = [ 'https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2016P10154', 'https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2016P10153' ] votings_urls = scraper.votings.get_voting_pages_for_dossier(self.dossier_nr) self.assertEqual(len(expected_urls), len(votings_urls)) for i in range(len(votings_urls)): self.assertEqual(votings_urls[i], expected_urls[i]) def test_get_votings_for_page(self): voting_page_urls = [ 'https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2016P10154', 'https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2016P10153' ] for url in voting_page_urls: votings = scraper.votings.get_votings_for_page(url) for voting in votings: print(voting)
Python
0.000003
b8d0344f0ca5c906e43d4071bc27a8d2acf114d1
bump version
webmpris/__init__.py
webmpris/__init__.py
__version__ = '1.1' __description__ = 'REST API to control media players via MPRIS2 interfaces' requires = [ 'pympris' ] README = """webmpris is a REST API to control media players via MPRIS2 interfaces. Supported intefaces: org.mpris.MediaPlayer2 via /players/<id>/Root org.mpris.MediaPlayer2.Player via /players/<id>/Player org.mpris.MediaPlayer2.TrackList via /players/<id>/TrackList org.mpris.MediaPlayer2.Playlists via /players/<id>/Playlists """
__version__ = '1.0' __description__ = 'REST API to control media players via MPRIS2 interfaces' requires = [ 'pympris' ] README = """webmpris is a REST API to control media players via MPRIS2 interfaces. Supported intefaces: org.mpris.MediaPlayer2 via /players/<id>/Root org.mpris.MediaPlayer2.Player via /players/<id>/Player org.mpris.MediaPlayer2.TrackList via /players/<id>/TrackList org.mpris.MediaPlayer2.Playlists via /players/<id>/Playlists """
Python
0
9acf7857167bb87438c7c0bebca1a7eda93ac23b
Make saml2idp compatible with Django 1.9
saml2idp/registry.py
saml2idp/registry.py
# -*- coding: utf-8 -*- from __future__ import absolute_import """ Registers and loads Processor classes from settings. """ import logging from importlib import import_module from django.core.exceptions import ImproperlyConfigured from . import exceptions from . import saml2idp_metadata logger = logging.getLogger(__name__) def get_processor(config): """ Get an instance of the processor with config. """ dottedpath = config['processor'] try: dot = dottedpath.rindex('.') except ValueError: raise ImproperlyConfigured('%s isn\'t a processors module' % dottedpath) sp_module, sp_classname = dottedpath[:dot], dottedpath[dot+1:] try: mod = import_module(sp_module) except ImportError, e: raise ImproperlyConfigured('Error importing processors %s: "%s"' % (sp_module, e)) try: sp_class = getattr(mod, sp_classname) except AttributeError: raise ImproperlyConfigured('processors module "%s" does not define a "%s" class' % (sp_module, sp_classname)) instance = sp_class(config) return instance def find_processor(request): """ Returns the Processor instance that is willing to handle this request. """ for name, sp_config in saml2idp_metadata.SAML2IDP_REMOTES.items(): proc = get_processor(sp_config) try: if proc.can_handle(request): return proc except exceptions.CannotHandleAssertion as exc: # Log these, but keep looking. logger.debug('%s %s' % (proc, exc)) raise exceptions.CannotHandleAssertion('None of the processors in SAML2IDP_REMOTES could handle this request.')
# -*- coding: utf-8 -*- from __future__ import absolute_import """ Registers and loads Processor classes from settings. """ # Python imports import logging # Django imports from django.utils.importlib import import_module from django.core.exceptions import ImproperlyConfigured # Local imports from . import exceptions from . import saml2idp_metadata # Setup logging logger = logging.getLogger(__name__) def get_processor(config): """ Get an instance of the processor with config. """ dottedpath = config['processor'] try: dot = dottedpath.rindex('.') except ValueError: raise ImproperlyConfigured('%s isn\'t a processors module' % dottedpath) sp_module, sp_classname = dottedpath[:dot], dottedpath[dot+1:] try: mod = import_module(sp_module) except ImportError, e: raise ImproperlyConfigured('Error importing processors %s: "%s"' % (sp_module, e)) try: sp_class = getattr(mod, sp_classname) except AttributeError: raise ImproperlyConfigured('processors module "%s" does not define a "%s" class' % (sp_module, sp_classname)) instance = sp_class(config) return instance def find_processor(request): """ Returns the Processor instance that is willing to handle this request. """ for name, sp_config in saml2idp_metadata.SAML2IDP_REMOTES.items(): proc = get_processor(sp_config) try: if proc.can_handle(request): return proc except exceptions.CannotHandleAssertion as exc: # Log these, but keep looking. logger.debug('%s %s' % (proc, exc)) raise exceptions.CannotHandleAssertion('None of the processors in SAML2IDP_REMOTES could handle this request.')
Python
0
b8cd1b6869651cd0cbe2cbeebc59c641f13e0e5b
Add todo for scopes permissions
polyaxon/scopes/permissions/scopes.py
polyaxon/scopes/permissions/scopes.py
from scopes.authentication.ephemeral import is_ephemeral_user from scopes.authentication.internal import is_internal_user from scopes.permissions.base import PolyaxonPermission class ScopesPermission(PolyaxonPermission): """ Scopes based Permissions, depends on the authentication backend. """ ENTITY = None SCOPE_MAPPING = None @staticmethod def _check_internal_or_ephemeral(request): return any([is_ephemeral_user(request.user), is_internal_user(request.user)]) def has_permission(self, request, view): if not request.auth: if not request.user.is_authenticated: return False # Session users are granted total access return True # TODO Add internal/ephemeral here # (if that type of auth is allowed, then we should not check he scope) if request.user.is_authenticated and request.user.is_superuser: return True allowed_scopes = set(self.SCOPE_MAPPING.get(request.method, [])) if not allowed_scopes: return True current_scopes = request.auth.scopes return any(s in allowed_scopes for s in current_scopes)
from scopes.authentication.ephemeral import is_ephemeral_user from scopes.authentication.internal import is_internal_user from scopes.permissions.base import PolyaxonPermission class ScopesPermission(PolyaxonPermission): """ Scopes based Permissions, depends on the authentication backend. """ ENTITY = None SCOPE_MAPPING = None @staticmethod def _check_internal_or_ephemeral(request): return any([is_ephemeral_user(request.user), is_internal_user(request.user)]) def has_permission(self, request, view): if not request.auth: if not request.user.is_authenticated: return False # Session users are granted total access return True if request.user.is_authenticated and request.user.is_superuser: return True allowed_scopes = set(self.SCOPE_MAPPING.get(request.method, [])) if not allowed_scopes: return True current_scopes = request.auth.scopes return any(s in allowed_scopes for s in current_scopes)
Python
0
ebacfc3ffe1cd1c9c58908c1f9dd78fe9eca9acd
fix for lambton not needed
ca_on_lambton/people.py
ca_on_lambton/people.py
from pupa.scrape import Scraper from utils import lxmlize, CanadianLegislator as Legislator import re COUNCIL_PAGE = 'http://www.lambtononline.ca/home/government/accessingcountycouncil/countycouncillors/Pages/default.aspx' class LambtonPersonScraper(Scraper): def get_people(self): page = lxmlize(COUNCIL_PAGE) councillors = page.xpath('//div[@id="WebPartWPQ1"]/table/tbody/tr[1]') for councillor in councillors: node = councillor.xpath('.//td[1]//strong//strong//strong//strong') or councillor.xpath('.//td[1]//strong') text = node[0].text_content() name = text.strip().replace('Deputy ', '').replace('Warden ', '').replace('Mayor', '') role = text.replace(name, '').strip() if not role: role = 'Councillor' if ',' in name: name = name.split(',')[0].strip() district = councillor.xpath('.//td[1]//p[contains(text(),",")]/text()')[0].split(',')[1].strip() district = re.sub(r'\A(?:City|Municipality|Town|Township|Village) of\b| Township\Z', '', district) p = Legislator(name=name, post_id=district, role=role) p.add_source(COUNCIL_PAGE) p.image = councillor.xpath('.//td[1]//img/@src')[0] info = councillor.xpath('.//td[2]')[0].text_content() residential_info = re.findall(r'(?<=Residence:)(.*)(?=Municipal Office:)', info, flags=re.DOTALL)[0] self.get_contacts(residential_info, 'residence', p) municipal_info = re.findall(r'(?<=Municipal Office:)(.*)', info, flags=re.DOTALL)[0] self.get_contacts(municipal_info, 'legislature', p) yield p def get_contacts(self, text, note, councillor): address = text.split('Telephone')[0] text = text.replace(address, '').split(':') for i, contact in enumerate(text): if i == 0: continue contact_type = next(x.strip() for x in re.findall(r'[A-Za-z ]+', text[i - 1]) if x.strip() and x.strip() != 'ext') if '@' in contact: contact = contact.strip() else: contact = re.findall(r'[0-9]{3}[- ][0-9]{3}-[0-9]{4}(?: ext\. [0-9]+)?', contact)[0].replace(' ', '-') if 'Fax' in contact_type: councillor.add_contact('fax', contact, note) elif 'Tel' in contact_type: councillor.add_contact('voice', contact, note) elif 'email' in contact_type: councillor.add_contact('email', contact, None) else: councillor.add_contact(contact_type, contact, note)
from pupa.scrape import Scraper from utils import lxmlize, CanadianLegislator as Legislator import re COUNCIL_PAGE = 'http://www.lambtononline.ca/home/government/accessingcountycouncil/countycouncillors/Pages/default.aspx' SGC = { 'St. Clair' : '3538003', 'Dawn-Euphemia' : '3538007', 'Brooke-Alvinston' : '3538015', 'Enniskillen' : '3538016', 'Oil Springs' : '3538018', 'Petrolia' : '3538019', 'Sarnia' : '3538030', 'Point Edward' : '3538031', 'Plympton-Wyoming' : '3538035', 'Lambton Shores' : '3538040', 'Warwick' : '3538043', } class LambtonPersonScraper(Scraper): def get_people(self): page = lxmlize(COUNCIL_PAGE) councillors = page.xpath('//div[@id="WebPartWPQ1"]/table/tbody/tr[1]') for councillor in councillors: node = councillor.xpath('.//td[1]//strong//strong//strong//strong') or councillor.xpath('.//td[1]//strong') text = node[0].text_content() name = text.strip().replace('Deputy ', '').replace('Warden ', '').replace('Mayor', '') role = text.replace(name, '').strip() if not role: role = 'Councillor' if ',' in name: name = name.split(',')[0].strip() district = councillor.xpath('.//td[1]//p[contains(text(),",")]/text()')[0].split(',')[1].strip() district = re.sub(r'\A(?:City|Municipality|Town|Township|Village) of\b| Township\Z', '', district) p = Legislator(name=name, post_id=district, role=role) p.add_source(COUNCIL_PAGE) p.image = councillor.xpath('.//td[1]//img/@src')[0] info = councillor.xpath('.//td[2]')[0].text_content() residential_info = re.findall(r'(?<=Residence:)(.*)(?=Municipal Office:)', info, flags=re.DOTALL)[0] self.get_contacts(residential_info, 'residence', p) municipal_info = re.findall(r'(?<=Municipal Office:)(.*)', info, flags=re.DOTALL)[0] self.get_contacts(municipal_info, 'legislature', p) # Needed for Represent integration. p.add_extra('sgc', SGC[district.strip()]) yield p def get_contacts(self, text, note, councillor): address = text.split('Telephone')[0] text = text.replace(address, '').split(':') for i, contact in enumerate(text): if i == 0: continue contact_type = next(x.strip() for x in re.findall(r'[A-Za-z ]+', text[i - 1]) if x.strip() and x.strip() != 'ext') if '@' in contact: contact = contact.strip() else: contact = re.findall(r'[0-9]{3}[- ][0-9]{3}-[0-9]{4}(?: ext\. [0-9]+)?', contact)[0].replace(' ', '-') if 'Fax' in contact_type: councillor.add_contact('fax', contact, note) elif 'Tel' in contact_type: councillor.add_contact('voice', contact, note) elif 'email' in contact_type: councillor.add_contact('email', contact, None) else: councillor.add_contact(contact_type, contact, note)
Python
0
c202a3a945453a4955f0acbf369227f8c9cee148
Rename link in init
__init__.py
__init__.py
import os from .batchflow import * __path__ = [os.path.join(os.path.dirname(__file__), 'batchflow')]
import os from .dataset import * __path__ = [os.path.join(os.path.dirname(__file__), 'dataset')]
Python
0
4a4731eda22170a77bb24dd3c7fc8ff4cafecf9d
bump version to 2.7b1
__init__.py
__init__.py
"""distutils The main package for the Python Module Distribution Utilities. Normally used from a setup script as from distutils.core import setup setup (...) """ __revision__ = "$Id$" # Distutils version # # Updated automatically by the Python release process. # #--start constants-- __version__ = "2.7b1" #--end constants--
"""distutils The main package for the Python Module Distribution Utilities. Normally used from a setup script as from distutils.core import setup setup (...) """ __revision__ = "$Id$" # Distutils version # # Updated automatically by the Python release process. # #--start constants-- __version__ = "2.7a4" #--end constants--
Python
0
bc43827ee733af9c37ca3b97b471ec1d2cde294b
Add unsubcribed handler to server.
echidna/server.py
echidna/server.py
import json from cyclone.web import Application, RequestHandler, HTTPError from cyclone.websocket import WebSocketHandler from echidna.cards.memory_store import InMemoryCardStore class EchidnaServer(Application): def __init__(self, root, **settings): self.store = InMemoryCardStore() handlers = [ (r"/", root), (r"/publish/(?P<channel>.*)/", PublicationHandler, dict(store=self.store)), (r"/subscribe", SubscriptionHandler, dict(store=self.store)), ] Application.__init__(self, handlers, **settings) class PublicationHandler(RequestHandler): def initialize(self, store): self.store = store def post(self, channel): try: channel = self.decode_argument(channel, "channel") except: raise HTTPError(400, "Invalid value for channel.") try: card = json.loads(self.request.body) except: raise HTTPError(400, "Invalid card in request body.") self.store.publish(channel, card) self.set_header("Content-Type", "application/json") self.write(json.dumps({"success": True})) class SubscriptionHandler(WebSocketHandler): def initialize(self, store): self.store = store self.client = None def _set_client(self, client): self.client = client def connectionMade(self, *args, **kw): d = self.store.create_client(self.on_publish) return d.addCallback(self._set_client) def connectionLost(self, reason): if self.client is not None: return self.store.remove_client(self.client) def messageReceived(self, msg): try: msg = json.loads(msg) except: return if not isinstance(msg, dict): return msg_type = msg.get("msg_type", "invalid") if not isinstance(msg_type, unicode): return handler = getattr(self, "handle_" + msg_type, self.handle_invalid) handler(msg) def on_publish(self, channel_name, card): return self.send_card(channel_name, card) def send_card(self, channel_name, card): msg = { "msg_type": "card", "channel": channel_name, "card": card, } self.sendMessage(json.dumps(msg)) def send_error(self, reason, **data): msg = { "msg_type": "error", "reason": reason, } msg.update(data) self.sendMessage(json.dumps(msg)) def send_cards(self, channel_name, cards): for card in cards: self.on_publish(channel_name, card) def handle_subscribe(self, msg): channel_name = msg.get("channel") if not isinstance(channel_name, unicode): return d = self.store.subscribe(channel_name, self.client) return d.addCallback( lambda cards: self.send_cards(channel_name, cards)) def handle_unsubscribed(self, msg): channel_name = msg.get("channel") if not isinstance(channel_name, unicode): return d = self.store.unsubscribe(channel_name, self.client) return d def handle_invalid(self, msg): self.send_error("invalid message", original_message=msg)
import json from cyclone.web import Application, RequestHandler, HTTPError from cyclone.websocket import WebSocketHandler from echidna.cards.memory_store import InMemoryCardStore class EchidnaServer(Application): def __init__(self, root, **settings): self.store = InMemoryCardStore() handlers = [ (r"/", root), (r"/publish/(?P<channel>.*)/", PublicationHandler, dict(store=self.store)), (r"/subscribe", SubscriptionHandler, dict(store=self.store)), ] Application.__init__(self, handlers, **settings) class PublicationHandler(RequestHandler): def initialize(self, store): self.store = store def post(self, channel): try: channel = self.decode_argument(channel, "channel") except: raise HTTPError(400, "Invalid value for channel.") try: card = json.loads(self.request.body) except: raise HTTPError(400, "Invalid card in request body.") self.store.publish(channel, card) self.set_header("Content-Type", "application/json") self.write(json.dumps({"success": True})) class SubscriptionHandler(WebSocketHandler): def initialize(self, store): self.store = store self.client = None def _set_client(self, client): self.client = client def connectionMade(self, *args, **kw): d = self.store.create_client(self.on_publish) return d.addCallback(self._set_client) def connectionLost(self, reason): if self.client is not None: return self.store.remove_client(self.client) def messageReceived(self, msg): try: msg = json.loads(msg) except: return if not isinstance(msg, dict): return msg_type = msg.get("msg_type", "invalid") if not isinstance(msg_type, unicode): return handler = getattr(self, "handle_" + msg_type, self.handle_invalid) handler(msg) def on_publish(self, channel_name, card): return self.send_card(channel_name, card) def send_card(self, channel_name, card): msg = { "msg_type": "card", "channel": channel_name, "card": card, } self.sendMessage(json.dumps(msg)) def send_error(self, reason, **data): msg = { "msg_type": "error", "reason": reason, } msg.update(data) self.sendMessage(json.dumps(msg)) def send_cards(self, channel_name, cards): for card in cards: self.on_publish(channel_name, card) def handle_subscribe(self, msg): channel_name = msg.get("channel") if not isinstance(channel_name, unicode): return d = self.store.subscribe(channel_name, self.client) return d.addCallback( lambda cards: self.send_cards(channel_name, cards)) def handle_invalid(self, msg): self.send_error("invalid message", original_message=msg)
Python
0
86eb16da4a6c3579eb514fa5ca73def7be8afd84
Add noqa codestyle
geotrek/api/v2/views/__init__.py
geotrek/api/v2/views/__init__.py
from rest_framework import response, permissions from rest_framework.views import APIView from django.conf import settings from django.contrib.gis.geos import Polygon from .authent import StructureViewSet # noqa from .common import TargetPortalViewSet, ThemeViewSet, SourceViewSet, ReservationSystemViewSet, LabelViewSet, OrganismViewSet # noqa if 'geotrek.core' in settings.INSTALLED_APPS: from .core import PathViewSet # noqa if 'geotrek.feedback' in settings.INSTALLED_APPS: from .feedback import ReportStatusViewSet, ReportActivityViewSet, ReportCategoryViewSet, ReportProblemMagnitudeViewSet # noqa if 'geotrek.trekking' in settings.INSTALLED_APPS: from .trekking import (TrekViewSet, TourViewSet, POIViewSet, POITypeViewSet, AccessibilityViewSet, RouteViewSet, # noqa DifficultyViewSet, NetworkViewSet, PracticeViewSet, # noqa WebLinkCategoryViewSet, ServiceTypeViewSet, ServiceViewSet, TrekRatingScaleViewSet, TrekRatingViewSet) # noqa if 'geotrek.sensitivity' in settings.INSTALLED_APPS: from .sensitivity import SensitiveAreaViewSet # noqa from .sensitivity import SportPracticeViewSet # noqa from .sensitivity import SpeciesViewSet # noqa if 'geotrek.tourism' in settings.INSTALLED_APPS: from .tourism import TouristicContentViewSet, TouristicEventViewSet, TouristicEventTypeViewSet, InformationDeskViewSet, TouristicContentCategoryViewSet # noqa if 'geotrek.zoning' in settings.INSTALLED_APPS: from .zoning import CityViewSet, DistrictViewSet # noqa if 'geotrek.outdoor' in settings.INSTALLED_APPS: from .outdoor import (SiteViewSet, OutdoorPracticeViewSet, SiteTypeViewSet, CourseTypeViewSet, # noqa OutdoorRatingScaleViewSet, OutdoorRatingViewSet, CourseViewSet, SectorViewSet) # noqa if 'geotrek.flatpages' in settings.INSTALLED_APPS: from .flatpages import FlatPageViewSet # noqa if 'geotrek.infrastructure' in settings.INSTALLED_APPS: from .infrastructure import InfrastructureTypeViewSet, InfrastructureViewSet, InfrastructureUsageDifficultyLevelViewSet, InfrastructureConditionViewSet, InfrastructureMaintenanceDifficultyLevelViewSet # noqa if 'geotrek.signage' in settings.INSTALLED_APPS: from .signage import SignageViewSet, SignageTypeViewSet, SealingViewSet, ColorViewSet, DirectionViewSet, BladeTypeViewSet # noqa if 'drf_yasg' in settings.INSTALLED_APPS: from .swagger import schema_view # noqa class ConfigView(APIView): """ Configuration endpoint that gives the BBox used in the Geotrek configuration """ permission_classes = [permissions.AllowAny, ] def get(self, request, *args, **kwargs): bbox = Polygon.from_bbox(settings.SPATIAL_EXTENT) bbox.srid = settings.SRID bbox.transform(settings.API_SRID) return response.Response({ 'bbox': bbox.extent })
from rest_framework import response, permissions from rest_framework.views import APIView from django.conf import settings from django.contrib.gis.geos import Polygon from .authent import StructureViewSet # noqa from .common import TargetPortalViewSet, ThemeViewSet, SourceViewSet, ReservationSystemViewSet, LabelViewSet, OrganismViewSet # noqa if 'geotrek.core' in settings.INSTALLED_APPS: from .core import PathViewSet # noqa if 'geotrek.feedback' in settings.INSTALLED_APPS: from .feedback import ReportStatusViewSet, ReportActivityViewSet, ReportCategoryViewSet, ReportProblemMagnitudeViewSet # noqa if 'geotrek.trekking' in settings.INSTALLED_APPS: from .trekking import (TrekViewSet, TourViewSet, POIViewSet, POITypeViewSet, AccessibilityViewSet, RouteViewSet, DifficultyViewSet, NetworkViewSet, PracticeViewSet, WebLinkCategoryViewSet, ServiceTypeViewSet, ServiceViewSet, TrekRatingScaleViewSet, TrekRatingViewSet) # noqa if 'geotrek.sensitivity' in settings.INSTALLED_APPS: from .sensitivity import SensitiveAreaViewSet # noqa from .sensitivity import SportPracticeViewSet # noqa from .sensitivity import SpeciesViewSet # noqa if 'geotrek.tourism' in settings.INSTALLED_APPS: from .tourism import TouristicContentViewSet, TouristicEventViewSet, TouristicEventTypeViewSet, InformationDeskViewSet, TouristicContentCategoryViewSet # noqa if 'geotrek.zoning' in settings.INSTALLED_APPS: from .zoning import CityViewSet, DistrictViewSet # noqa if 'geotrek.outdoor' in settings.INSTALLED_APPS: from .outdoor import (SiteViewSet, OutdoorPracticeViewSet, SiteTypeViewSet, CourseTypeViewSet, OutdoorRatingScaleViewSet, OutdoorRatingViewSet, CourseViewSet, SectorViewSet) # noqa if 'geotrek.flatpages' in settings.INSTALLED_APPS: from .flatpages import FlatPageViewSet # noqa if 'geotrek.infrastructure' in settings.INSTALLED_APPS: from .infrastructure import InfrastructureTypeViewSet, InfrastructureViewSet, InfrastructureUsageDifficultyLevelViewSet, InfrastructureConditionViewSet, InfrastructureMaintenanceDifficultyLevelViewSet # noqa if 'geotrek.signage' in settings.INSTALLED_APPS: from .signage import SignageViewSet, SignageTypeViewSet, SealingViewSet, ColorViewSet, DirectionViewSet, BladeTypeViewSet # noqa if 'drf_yasg' in settings.INSTALLED_APPS: from .swagger import schema_view # noqa class ConfigView(APIView): """ Configuration endpoint that gives the BBox used in the Geotrek configuration """ permission_classes = [permissions.AllowAny, ] def get(self, request, *args, **kwargs): bbox = Polygon.from_bbox(settings.SPATIAL_EXTENT) bbox.srid = settings.SRID bbox.transform(settings.API_SRID) return response.Response({ 'bbox': bbox.extent })
Python
0
dded8beb4a075dfc44938d5355727cc4058ba80b
Fix typo
athenet/data_loader/data_loader_buffer.py
athenet/data_loader/data_loader_buffer.py
"""Buffer for storing large network data.""" import numpy as np import theano class Buffer(object): """Buffer storing data from contiguous subsequence of minibatches. Content of a buffer is a 4-dimensional floating-point tensor. """ def __init__(self, data_loader=None): """Create data Buffer. :data_loader: Instance of DataLoader that will be using Buffer. """ self.begin = -1 self.end = 0 self.offset = theano.shared(0) self.parent = data_loader # Create a 4-dimensinal tensor shared variable for data. Exact size of # the tensor is determined when data is set, and can change over time. self._data = theano.shared( np.zeros((1, 1, 1, 1), dtype=theano.config.floatX), borrow=True) @property def data(self): """Shared variable representing data stored in a buffer.""" return self._data def __getitem__(self, key): """Return minibatches of given indices. Return data is taken from data array, however key represents minibatch index, not direct index in data array. Effectively, buffer can be used as if it contained all of the minibatches data. Parent must be set before using this method, as minibatch size is needed to determine shift that has to be used in data array. :key: Symbolic index or slice representing indices of minibatches to return. :return: Minibatches data. """ shift = self.offset * self.parent.batch_size if isinstance(key, slice): start, stop, step = key.start, key.stop, key.step return self._data[start-shift:stop-shift:step] else: return self._data[key-shift] def set(self, data, batch_index=None, n_of_batches=None): """Set buffer data. :data: Data to be stored in a buffer. :batch_index: Index of first minibatch that is contained in given data. :n_of_batches: Number of minibatches that are contained in given data. """ if batch_index: self.begin = batch_index self.offset.set_value(batch_index) if n_of_batches: self.end = batch_index + n_of_batches self._data.set_value( np.asarray(np.concatenate(data, axis=0), dtype=theano.config.floatX), borrow=True) def contains(self, batch_index): """Check if minibatch is contained in a buffer. :batch_index: Index of a minibatch. :return: True, if minibatch of a given index is contained in a buffer. False otherwise. """ return batch_index >= self.begin and batch_index < self.end
"""Buffer for storing large network data.""" import numpy as np import theano class Buffer(object): """Buffer storing data from contiguous subsequence of minibatches. Content of a buffer is a 4-dimensional floating-point tensor. """ def __init__(self, data_loader=None): """Create data Buffer. :data_loader: Instance of DataLoader that will be using Buffer. """ self.begin = -1 self.end = 0 self.offset = theano.shared(0) self.parent = data_loader # Create a 4-dimensinal tensor shared variable for data. Exact size of # the tensor is determined when data is set, and can change over time. self._data = theano.shared( np.zeros((1, 1, 1, 1), dtype=theano.config.floatX), borrow=True) @property def data(self): """Shared variable representing data stored in a buffer.""" return self._data def __getitem__(self, key): """Return minibatches of given indices. Return data is taken from data array, however key represents minibatch index, not direct index in data array. Effectively, buffer can be used as if it contained all of the minibatches data. Parent must be set before using this method, as minibatch size is needed to determine shift that has to be uses in data array. :key: Symbolic index or slice representing indices of minibatches to return. :return: Minibatches data. """ shift = self.offset * self.parent.batch_size if isinstance(key, slice): start, stop, step = key.start, key.stop, key.step return self._data[start-shift:stop-shift:step] else: return self._data[key-shift] def set(self, data, batch_index=None, n_of_batches=None): """Set buffer data. :data: Data to be stored in a buffer. :batch_index: Index of first minibatch that is contained in given data. :n_of_batches: Number of minibatches that are contained in given data. """ if batch_index: self.begin = batch_index self.offset.set_value(batch_index) if n_of_batches: self.end = batch_index + n_of_batches self._data.set_value( np.asarray(np.concatenate(data, axis=0), dtype=theano.config.floatX), borrow=True) def contains(self, batch_index): """Check if minibatch is contained in a buffer. :batch_index: Index of a minibatch. :return: True, if minibatch of a given index is contained in a buffer. False otherwise. """ return batch_index >= self.begin and batch_index < self.end
Python
0.999999
a962f1e0aced277e673eddc6b70e316bba482f24
fix typo
api/mail.py
api/mail.py
from flask import Flask, render_template from api import app from api.models import User, Invites, Reset from flask_mail import Mail from flask_mail import Message app.config.update( MAIL_SERVER = 'smtp.yandex.com', MAIL_PORT = 465, MAIL_USE_SSL = True , MAIL_USERNAME = 'cross-apps@yandex.com', MAIL_PASSWORD = 'innovativeproject', ) mail = Mail(app) def send_email(subject, sender, recipients, html_body): """ Sends email of given subject, sender, recipents (array) and html template. """ msg = Message(subject=subject, sender=sender, recipients=recipients) msg.html = html_body mail.send(msg) def send_email_register(sender,recip): """ User invitation email. """ email = recip[0] username = email.split('@')[0] admin = sender.split('@')[0] new = Invites.query.filter_by(email = email).first() url = 'https://cross-app-links.herokuapp.com/api/auth/setpassword?token=' + str(new.token) subject = "Cross-apps registration" headerText = "You've received an invitation!" freeText = "Administrator has invited you to join Cross-apps shortcuts!" userTextBold = "You can complete your registartion by clicking the button or entering the link. \n Set up your unique password and make yourself home!" userText = "" send_email(subject, 'cross-apps@yandex.com', recip, render_template("email_template.html", user=username, sender=admin, url=url, subject=subject, buttonText="Register", headerText=headerText, freeText=freeText, userTextBold=userTextBold, userText=userText)) def send_email_reset(email): """ User password reset email. """ recipent = email[0] username = recipent.split('@')[0] new = Reset.query.filter_by(email = recipent).first() url = 'https://cross-app-links.herokuapp.com/api/auth/setnewpassword?token=' + str(new.token) subject = "Cross-apps password reset" headerText = "Looks like you want to reset your password!" freeText = "Here we send you instructions to set up a new password for your account!" userTextBold = "Please proceed by clicking the button. \n You will be displayed a page that will allow you to set a new password." userText = "If you forget your password again, please consider drinking green tea. Green tea contains polyphenols, powerful antioxidants that protect against free radicals that can damage brain cells. Among many other benefits, regular consumption of green tea may enhance memory and mental alertness and slow brain aging." send_email(subject, 'cross-apps@yandex.com', email, render_template("email_template.html", user=username, sender="system", url=url, subject=subject, buttonText="RESET", headerText=headerText, freeText=freeText, userTextBold=userTextBold, userText=userText))
from flask import Flask, render_template from api import app from api.models import User, Invites, Reset from flask_mail import Mail from flask_mail import Message app.config.update( MAIL_SERVER = 'smtp.yandex.com', MAIL_PORT = 465, MAIL_USE_SSL = True , MAIL_USERNAME = 'cross-apps@yandex.com', MAIL_PASSWORD = 'innovativeproject', ) mail = Mail(app) def send_email(subject, sender, recipients, html_body): """ Sends email of given subject, sender, recipents (array) and html template. """ msg = Message(subject=subject, sender=sender, recipients=recipients) msg.html = html_body mail.send(msg) def send_email_register(sender,recip): """ User invitation email. """ email = recip[0] username = email.split('@')[0] admin = sender.split('@')[0] new = Invites.query.filter_by(email = email).first() url = 'https://cross-app-links.herokuapp.com/api/auth/setpassword?token=' + str(new.token) subject = "Cross-apps registration" headerText = "You've received an invitation!" freeText = "Administrator has invited you to join Cross-apps shortcuts!" userTextBold = "You can complete your registartion by clicking the button or entering the link. \n Set up your unique password and make yourself home!" userText = "" send_email(subject, 'cross-apps@yandex.com', recip, render_template("email_reset_template.html", user=username, sender=admin, url=url, subject=subject, buttonText="Register", headerText=headerText, freeText=freeText, userTextBold=userTextBold, userText=userText)) def send_email_reset(email): """ User password reset email. """ recipent = email[0] username = recipent.split('@')[0] new = Reset.query.filter_by(email = recipent).first() url = 'https://cross-app-links.herokuapp.com/api/auth/setnewpassword?token=' + str(new.token) subject = "Cross-apps password reset" headerText = "Looks like you want to reset your password!" freeText = "Here we send you instructions to set up a new password for your account!" userTextBold = "Please proceed by clicking the button. \n You will be displayed a page that will allow you to set a new password." userText = "If you forget your password again, please consider drinking green tea. Green tea contains polyphenols, powerful antioxidants that protect against free radicals that can damage brain cells. Among many other benefits, regular consumption of green tea may enhance memory and mental alertness and slow brain aging." send_email(subject, 'cross-apps@yandex.com', email, render_template("email_template.html", user=username, sender="system", url=url, subject=subject, buttonText="RESET", headerText=headerText, freeText=freeText, userTextBold=userTextBold, userText=userText))
Python
0.998939
f9a1da6e60bfbd9c9e5be769f1223d628cec6481
set the module version
base_external_referentials/__openerp__.py
base_external_referentials/__openerp__.py
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2009 Akretion (<http://www.akretion.com>). All Rights Reserved # authors: Raphaël Valyi, Sharoon Thomas # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Base External Referentials', 'version': '6.1.0', 'category': 'Generic Modules/Base', 'description': """ Definition : a referential is an external system that will interacts with OpenERP Goal : store external system connection details and objects fields mapping This module provide an abstract common minimal base to add any additional external id columns to some OpenObject table, pointing to some external referential. A referential is abstract and minimal at this stage, it only has: * a name * a location (possibly webservice URL, database connection URL...); the connection method will tell it... * referential credentials (user name + password) * placeholders for custom in and out mapping for OpenERP object fields. OpenERP already has limited supported to external ids using the ir_model_data and the id fields in the loaded data such as XML or CSV. We think that's OK to store all referential ids into the same ir_model_data table: yes it makes it large, but synchronisation operations involve a network bottleneck anyway, so it's largely OK and negligible to have a large table here. The existing ir_model_data feature of OpenERP is mostly thought as an mono-external referential (even if the module key of ir_model_data plays some referential scoping role). Here we just push the concept further to assume multiple external ids for OpenERP entities and add the possibility to customize their field mapping directly in OpenERP to accomodate the external systems. """, 'author': 'Raphaël Valyi (Akretion.com), Sharoon Thomas (Openlabs.co.in)', 'website': 'http://www.akretion.com, http://openlabs.co.in/', 'depends': ['base','base_pop_up', 'base_file_protocole', 'email_template'], 'init_xml': [], 'update_xml': [ 'external_referentials_view.xml', 'report_view.xml', 'external_referentials_menu.xml', 'security/ir.model.access.csv', 'group_fields_view.xml', 'security/base_external_referentials_security.xml', 'report_mail_template.xml', ], 'demo_xml': [], 'installable': True, 'certificate': '', } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2009 Akretion (<http://www.akretion.com>). All Rights Reserved # authors: Raphaël Valyi, Sharoon Thomas # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Base External Referentials', 'version': '1.0', 'category': 'Generic Modules/Base', 'description': """ Definition : a referential is an external system that will interacts with OpenERP Goal : store external system connection details and objects fields mapping This module provide an abstract common minimal base to add any additional external id columns to some OpenObject table, pointing to some external referential. A referential is abstract and minimal at this stage, it only has: * a name * a location (possibly webservice URL, database connection URL...); the connection method will tell it... * referential credentials (user name + password) * placeholders for custom in and out mapping for OpenERP object fields. OpenERP already has limited supported to external ids using the ir_model_data and the id fields in the loaded data such as XML or CSV. We think that's OK to store all referential ids into the same ir_model_data table: yes it makes it large, but synchronisation operations involve a network bottleneck anyway, so it's largely OK and negligible to have a large table here. The existing ir_model_data feature of OpenERP is mostly thought as an mono-external referential (even if the module key of ir_model_data plays some referential scoping role). Here we just push the concept further to assume multiple external ids for OpenERP entities and add the possibility to customize their field mapping directly in OpenERP to accomodate the external systems. """, 'author': 'Raphaël Valyi (Akretion.com), Sharoon Thomas (Openlabs.co.in)', 'website': 'http://www.akretion.com, http://openlabs.co.in/', 'depends': ['base','base_pop_up', 'base_file_protocole', 'email_template'], 'init_xml': [], 'update_xml': [ 'external_referentials_view.xml', 'report_view.xml', 'external_referentials_menu.xml', 'security/ir.model.access.csv', 'group_fields_view.xml', 'security/base_external_referentials_security.xml', 'report_mail_template.xml', ], 'demo_xml': [], 'installable': True, 'certificate': '', } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Python
0
6eeb2b4f79c2f735552cf7c061b48425d3299e51
Use argparse.
validate_equajson.py
validate_equajson.py
#! /usr/bin/env python3 import json import jsonschema import sys import os import argparse def main(equajson_path, schema_path): global filepath filepath = equajson_path with open(schema_path) as schema_file: try: equajson_schema = json.load(schema_file) except: sys.stderr.write("Invalid JSON in schema: `"+schema_file.name+"'"+'\n') raise with open(equajson_path) as json_file: try: equajson = json.load(json_file) except: sys.stderr.write("Invalid JSON in file: `"+json_file.name+"'"+'\n') raise try: jsonschema.validate(equajson, equajson_schema) except jsonschema.exceptions.ValidationError: sys.stderr.write(json_file.name+'\n') raise basename_no_extension = os.path.splitext(os.path.basename(json_file.name))[0] # It's easier to make this a global variable # than to thread it through every function. filepath = None if __name__ == '__main__': parser = argparse.ArgumentParser(description='validate equajson files') parser.add_argument( '-s', '--schema', help='path to schema file', required=True ) parser.add_argument( 'json_file', help='path to json file to validate' ) args = parser.parse_args() main(args.json_file, args.schema)
#! /usr/bin/env python3 import json import jsonschema import sys import os def main(equajson_path, schema_path): global filepath filepath = equajson_path with open(schema_path) as schema_file: try: equajson_schema = json.load(schema_file) except: sys.stderr.write("Invalid JSON in schema: `"+schema_file.name+"'"+'\n') raise with open(equajson_path) as json_file: try: equajson = json.load(json_file) except: sys.stderr.write("Invalid JSON in file: `"+json_file.name+"'"+'\n') raise try: jsonschema.validate(equajson, equajson_schema) except jsonschema.exceptions.ValidationError: sys.stderr.write(json_file.name+'\n') raise basename_no_extension = os.path.splitext(os.path.basename(json_file.name))[0] # It's easier to make this a global variable # than to thread it through every function. filepath = None if __name__ == '__main__': num_args = len(sys.argv) - 1 if num_args != 2: sys.stderr.write("Usage: python "+sys.argv[0]+" equajson.json schema.json"+'\n') sys.exit(1) main(sys.argv[1], sys.argv[2])
Python
0.000001
5ecd20d86a0fe2586cbac4daadd34bb13443f94d
set central prototype executable
central/CentralProto.py
central/CentralProto.py
#!/usr/bin/python # -*- coding: utf-8 -*- import time from app.nrf24 import NRF24 from app.cipher import XTEA from app.message import MessageType # RF Communication constants NETWORK = 0xC05A SERVER_ID = 0x01 # Hardware constants CE_PIN = 25 # Timing constants PERIOD_REFRESH_KEY_SECS = 120.0 CODE = '123456' #TODO refactor all conversion methods into a common place def byte(val): return val & 0xFF def to_int(val): return (byte(val[0]) << 8) + byte(val[1]) def to_long(val): return (byte(val[0]) << 24) + (byte(val[1]) << 16) + (byte(val[2]) << 8) + byte(val[3]) def from_long(val): return [byte(val >> 24), byte(val >> 16), byte(val >> 8), byte (val)] def convert_key(key): key2 = [] for i in key: key2 += from_long(i) return key2 class Device: def __init__(self): self.cipher = XTEA() self.latest_ping = time.time() self.latest_voltage_level = None self.next_key_time = 0 # List of all devices and their encoding keys keys = {} # Current alarm status locked = True if __name__ == '__main__': print "Alarm System Central Prototype..." nrf = NRF24(NETWORK, SERVER_ID) print "NRF24 instance created..." nrf.begin(0, 0, CE_PIN) print "NRF24 instance started..." while True: # Wait forever for remote modules calls #FIXME we should limit the timeout in order to frequently check that all known devices # are pinging as expected... payload = nrf.recv() now = time.clock() # Have we received something? if payload: # Yes, find the originating device and port (message type) device_id = payload.device port = payload.port content = payload.content # Add the device if first time device = keys.get(device_id) if not device: device = Device() keys[device_id] = device print "Source %02X, port %02X" % (device, port) # Manage received message based on its type (port) if port == MessageType.PING_SERVER: device.latest_ping = now payload = [locked] # Check if need to generate and send new cipher key if now >= device.next_key_time: key = XTEA.generate_key() device.cipher.set_key(key) device.next_key_time = now + PERIOD_REFRESH_KEY_SECS payload = [locked] payload += convert_key(key) nrf.send(device_id, port, payload) elif port == MessageType.VOLTAGE_LEVEL: device.latest_voltage_level = to_int(content) print "Source %02X, voltage = %d mV" % (device, device.latest_voltage_level) elif port in [MessageType.LOCK_CODE, MessageType.UNLOCK_CODE]: #TODO decipher code = device.cipher.decipher([to_long(content[0:4]), to_long(content[4:8])]) code = from_long(code[0]) + from_long(code[1]) print "Source %02X, code = %s" % (device, code) #TODO convert to string and compare to CODE # Send current lock status nrf.send(device_id, port, [locked]) else: print "Source %02X, unknown port %02X!" % (device, port)
#!/usr/bin/python # -*- coding: utf-8 -*- import time from app.nrf24 import NRF24 from app.cipher import XTEA from app.message import MessageType # RF Communication constants NETWORK = 0xC05A SERVER_ID = 0x01 # Hardware constants CE_PIN = 25 # Timing constants PERIOD_REFRESH_KEY_SECS = 120.0 CODE = '123456' #TODO refactor all conversion methods into a common place def byte(val): return val & 0xFF def to_int(val): return (byte(val[0]) << 8) + byte(val[1]) def to_long(val): return (byte(val[0]) << 24) + (byte(val[1]) << 16) + (byte(val[2]) << 8) + byte(val[3]) def from_long(val): return [byte(val >> 24), byte(val >> 16), byte(val >> 8), byte (val)] def convert_key(key): key2 = [] for i in key: key2 += from_long(i) return key2 class Device: def __init__(self): self.cipher = XTEA() self.latest_ping = time.time() self.latest_voltage_level = None self.next_key_time = 0 # List of all devices and their encoding keys keys = {} # Current alarm status locked = True if __name__ == '__main__': print "Alarm System Central Prototype..." nrf = NRF24(NETWORK, SERVER_ID) print "NRF24 instance created..." nrf.begin(0, 0, CE_PIN) print "NRF24 instance started..." while True: # Wait forever for remote modules calls #FIXME we should limit the timeout in order to frequently check that all known devices # are pinging as expected... payload = nrf.recv() now = time.clock() # Have we received something? if payload: # Yes, find the originating device and port (message type) device_id = payload.device port = payload.port content = payload.content # Add the device if first time device = keys.get(device_id) if not device: device = Device() keys[device_id] = device print "Source %02X, port %02X" % (device, port) # Manage received message based on its type (port) if port == MessageType.PING_SERVER: device.latest_ping = now payload = [locked] # Check if need to generate and send new cipher key if now >= device.next_key_time: key = XTEA.generate_key() device.cipher.set_key(key) device.next_key_time = now + PERIOD_REFRESH_KEY_SECS payload = [locked] payload += convert_key(key) nrf.send(device_id, port, payload) elif port == MessageType.VOLTAGE_LEVEL: device.latest_voltage_level = to_int(content) print "Source %02X, voltage = %d mV" % (device, device.latest_voltage_level) elif port in [MessageType.LOCK_CODE, MessageType.UNLOCK_CODE]: #TODO decipher code = device.cipher.decipher([to_long(content[0:4]), to_long(content[4:8])]) code = from_long(code[0]) + from_long(code[1]) print "Source %02X, code = %s" % (device, code) #TODO convert to string and compare to CODE # Send current lock status nrf.send(device_id, port, [locked]) else: print "Source %02X, unknown port %02X!" % (device, port)
Python
0.000001
e6cb1617e588d6b276fe01c401f2c1b34cf88d5f
fix stuff
api/read.py
api/read.py
import datetime from django.http import JsonResponse from dateutil.parser import parse from django.contrib.auth.decorators import login_required from api.models import ( Applicant, Client, Disabilities, EmploymentEducation, Enrollment, HealthAndDV, IncomeBenefits, Services ) def get_applicants(request): applicant = {} return JsonResponse(applicant) def search_clients(request): ''' request.POST = query ''' clients = Client.objects.all() if 'query' in request.POST: q = request.POST['query'] if q.isdigit(): clients = clients.filter(uuid=q) else: clients = clients.filter(last_name__contains=q) return JsonResponse([{ "first_name": c.first_name, "middle_name": c.middle_name, "last_name": c.last_name, "social_security": c.social_security, "date_of_birth": datetime.datetime.strftime(c.date_of_birth, '%m/%d/%Y'), "ethnicity": 1, "gender": 1, "veteran": 1, "year_entered": c.year_entered, "year_exited": c.year_exited, "date_created": c.date_created } for c in clients], safe=False) def get_applicants(request): app_list = Applicant.objects.all() applicant = [{ "id": c.id, "first_name": c.first_name, "last_name": c.last_name, "why": c.why, "phone": c.phone, "email": c.emial, "address": c.address, "birthday": c.birthday, "ethnicity": value_maps.ethnicity[c.ethnicity], "gender": value_maps.gender[c.gender], "veteran": value_maps.veteran[c.veteran], "family": c.family, "domestic_violence": value_maps.domestic_violence[c.domestic_violence], "pregnancy": c.pregnancy, "drug": c.drug, "urgency": c.urgency, "created": c.created, "reviewed": c.reviewed, } for c in app_list] return JsonResponse(applicant, safe=False)
import datetime from django.http import JsonResponse from dateutil.parser import parse from django.contrib.auth.decorators import login_required from api.models import ( Applicant, Client, Disabilities, EmploymentEducation, Enrollment, HealthAndDV, IncomeBenefits, Services ) def get_applicants(request): applicant = {} return JsonResponse(applicant) def search_clients(request): ''' request.POST = query ''' clients = Client.objects.all() if 'query' in request.POST: q = request.POST['query'] if q.isdigit(): clients = clients.filter(uuid=q) else: clients = clients.filter(last_name__contains=q) return JsonResponse([{ "first_name": c.first_name, "middle_name": c.middle_name, "last_name": c.last_name, "social_security": c.social_security, "date_of_birth": datetime.datetime.strftime(c.date_of_birth, '%m/%d/%Y'), "ethnicity": 1, "gender": 1, "veteran": 1, "year_entered": c.year_entered, "year_exited": c.year_exited, "date_created": c.date_created } for c in clients], safe=False) <<<<<<< Updated upstream def get_applicants(request): app_list = Applicant.objects.all() applicant = [{ "id": c.id, "first_name": c.first_name, "last_name": c.last_name, "why": c.why, "phone": c.phone, "email": c.emial, "address": c.address, "birthday": c.birthday, "ethnicity": value_maps.ethnicity[c.ethnicity], "gender": value_maps.gender[c.gender], "veteran": value_maps.veteran[c.veteran], "family": c.family, "domestic_violence": value_maps.domestic_violence[c.domestic_violence], "pregnancy": c.pregnancy, "drug": c.drug, "urgency": c.urgency, "created": c.created, "reviewed": c.reviewed, } for c in app_list] return JsonResponse(applicant, safe=False)
Python
0.000002
ae7b583cab8d38b04ce57571f50221b4a2e429f6
Update base.py
webhook/base.py
webhook/base.py
""" Base webhook implementation """ import json from django.http import HttpResponse from django.views.generic import View from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt class WebhookBase(View): """ Simple Webhook base class to handle the most standard case. """ @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): return super(WebhookBase, self).dispatch(request, *args, **kwargs) def post(self, request, *args, **kwargs): data = json.loads(request.body.decode('utf-8')) self.process_webhook(data) return HttpResponse(status=200) def process_webhook(self, data=None): """ Unimplemented method """ raise NotImplementedError
""" Base webhook implementation """ import json from django.http import HttpResponse from django.views.generic import View from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt class WebhookBase(View): """ Simple Webhook base class to handle the most standard case. """ @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): return super(WebhookBase, self).dispatch(request, *args, **kwargs) def post(self, request, *args, **kwargs): data = json.loads(request.body.decode('utf-8')) self.process_webhook(data) return HttpResponse(status=200) def process_webhook(self, data): """ Unimplemented method """ raise NotImplementedError
Python
0.000001
46b860e93d8a9e8dda3499b7306e30ebcd0e0174
handle session stopped
webnotes/app.py
webnotes/app.py
import sys, os import json sys.path.insert(0, '.') sys.path.insert(0, 'app') sys.path.insert(0, 'lib') from werkzeug.wrappers import Request, Response from werkzeug.local import LocalManager from webnotes.middlewares import StaticDataMiddleware from werkzeug.exceptions import HTTPException from werkzeug.contrib.profiler import ProfilerMiddleware from webnotes import get_config import mimetypes import webnotes import webnotes.handler import webnotes.auth import webnotes.webutils local_manager = LocalManager([webnotes.local]) def handle_session_stopped(): res = Response("""<html> <body style="background-color: #EEE;"> <h3 style="width: 900px; background-color: #FFF; border: 2px solid #AAA; padding: 20px; font-family: Arial; margin: 20px auto"> Updating. We will be back in a few moments... </h3> </body> </html>""") res.status_code = 503 res.content_type = 'text/html' return res @Request.application def application(request): webnotes.local.request = request try: site = webnotes.utils.get_site_name(request.host) webnotes.init(site=site) webnotes.local.form_dict = webnotes._dict({ k:v[0] if isinstance(v, (list, tuple)) else v \ for k, v in (request.form or request.args).iteritems() }) webnotes.local._response = Response() try: webnotes.http_request = webnotes.auth.HTTPRequest() except webnotes.AuthenticationError, e: pass if webnotes.form_dict.cmd: webnotes.handler.handle() else: webnotes.webutils.render(webnotes.request.path[1:]) except HTTPException, e: return e except webnotes.SessionStopped, e: webnotes.local._response = handle_session_stopped() finally: if webnotes.conn: webnotes.conn.close() return webnotes.local._response application = local_manager.make_middleware(application) def serve(port=8000, profile=False): webnotes.validate_versions() global application from werkzeug.serving import run_simple if profile: application = ProfilerMiddleware(application) application = StaticDataMiddleware(application, { '/': 'public', }) run_simple('0.0.0.0', int(port), application, use_reloader=True, use_debugger=True, use_evalex=True)
import sys, os import json sys.path.insert(0, '.') sys.path.insert(0, 'app') sys.path.insert(0, 'lib') from werkzeug.wrappers import Request, Response from werkzeug.local import LocalManager from webnotes.middlewares import StaticDataMiddleware from werkzeug.exceptions import HTTPException from werkzeug.contrib.profiler import ProfilerMiddleware from webnotes import get_config import mimetypes import webnotes import webnotes.handler import webnotes.auth import webnotes.webutils local_manager = LocalManager([webnotes.local]) @Request.application def application(request): webnotes.local.request = request try: site = webnotes.utils.get_site_name(request.host) webnotes.init(site=site) webnotes.local.form_dict = webnotes._dict({ k:v[0] if isinstance(v, (list, tuple)) else v \ for k, v in (request.form or request.args).iteritems() }) webnotes.local._response = Response() try: webnotes.http_request = webnotes.auth.HTTPRequest() except webnotes.AuthenticationError, e: pass if webnotes.form_dict.cmd: webnotes.handler.handle() else: webnotes.webutils.render(webnotes.request.path[1:]) except HTTPException, e: return e finally: if webnotes.conn: webnotes.conn.close() return webnotes._response application = local_manager.make_middleware(application) def serve(port=8000, profile=False): webnotes.validate_versions() global application from werkzeug.serving import run_simple if profile: application = ProfilerMiddleware(application) application = StaticDataMiddleware(application, { '/': 'public', }) run_simple('0.0.0.0', int(port), application, use_reloader=True, use_debugger=True, use_evalex=True)
Python
0
e38fa3f55b0e60a1d6c7fa0cf194e6f3bd4b899d
add histogram util
corehq/util/datadog/gauges.py
corehq/util/datadog/gauges.py
from functools import wraps from celery.task import periodic_task from corehq.util.datadog import statsd, datadog_logger from corehq.util.soft_assert import soft_assert def datadog_gauge_task(name, fn, run_every, enforce_prefix='commcare'): """ helper for easily registering datadog gauges to run periodically To update a datadog gauge on a schedule based on the result of a function just add to your app's tasks.py: my_calculation = datadog_gauge_task('my.datadog.metric', my_calculation_function, run_every=crontab(minute=0)) """ _enforce_prefix(name, enforce_prefix) datadog_gauge = _DatadogGauge(name, fn, run_every) return datadog_gauge.periodic_task() def datadog_histogram(name, value, enforce_prefix='commcare', tags=None): """ Usage: Used to track the statistical distribution of a set of values over a statsd flush period. Actually submits as multiple metrics: """ _datadog_record(statsd.histogram, name, value, enforce_prefix, tags) def datadog_gauge(name, value, enforce_prefix='commcare', tags=None): _datadog_record(statsd.gauge, name, value, enforce_prefix, tags) def datadog_counter(name, value=1, enforce_prefix='commcare', tags=None): _datadog_record(statsd.increment, name, value, enforce_prefix, tags) def _datadog_record(fn, name, value, enforce_prefix='commcare', tags=None): _enforce_prefix(name, enforce_prefix) try: fn(name, value, tags=tags) except Exception: datadog_logger.exception('Unable to record Datadog stats') class _DatadogGauge(object): def __init__(self, name, fn, run_every): self.name = name self.fn = fn self.run_every = run_every def periodic_task(self): @periodic_task('background_queue', run_every=self.run_every, acks_late=True, ignore_result=True) @wraps(self.fn) def inner(*args, **kwargs): statsd.gauge(self.name, self.fn(*args, **kwargs)) return inner def _enforce_prefix(name, prefix): soft_assert(fail_if_debug=True).call( not prefix or name.split('.')[0] == prefix, "Did you mean to call your gauge 'commcare.{}'? " "If you're sure you want to forgo the prefix, you can " "pass enforce_prefix=None".format(name))
from functools import wraps from celery.task import periodic_task from corehq.util.datadog import statsd, datadog_logger from corehq.util.soft_assert import soft_assert def datadog_gauge_task(name, fn, run_every, enforce_prefix='commcare'): """ helper for easily registering datadog gauges to run periodically To update a datadog gauge on a schedule based on the result of a function just add to your app's tasks.py: my_calculation = datadog_gauge_task('my.datadog.metric', my_calculation_function, run_every=crontab(minute=0)) """ _enforce_prefix(name, enforce_prefix) datadog_gauge = _DatadogGauge(name, fn, run_every) return datadog_gauge.periodic_task() def datadog_gauge(name, value, enforce_prefix='commcare', tags=None): _datadog_record(statsd.gauge, name, value, enforce_prefix, tags) def datadog_counter(name, value=1, enforce_prefix='commcare', tags=None): _datadog_record(statsd.increment, name, value, enforce_prefix, tags) def _datadog_record(fn, name, value, enforce_prefix='commcare', tags=None): _enforce_prefix(name, enforce_prefix) try: fn(name, value, tags=tags) except Exception: datadog_logger.exception('Unable to record Datadog stats') class _DatadogGauge(object): def __init__(self, name, fn, run_every): self.name = name self.fn = fn self.run_every = run_every def periodic_task(self): @periodic_task('background_queue', run_every=self.run_every, acks_late=True, ignore_result=True) @wraps(self.fn) def inner(*args, **kwargs): statsd.gauge(self.name, self.fn(*args, **kwargs)) return inner def _enforce_prefix(name, prefix): soft_assert(fail_if_debug=True).call( not prefix or name.split('.')[0] == prefix, "Did you mean to call your gauge 'commcare.{}'? " "If you're sure you want to forgo the prefix, you can " "pass enforce_prefix=None".format(name))
Python
0.000786
3643f0ce1b7ea7982e8081ae29e726c73471cc4b
update description
vcspull/__about__.py
vcspull/__about__.py
__title__ = 'vcspull' __package_name__ = 'vcspull' __description__ = 'synchronize your repos' __version__ = '1.0.0' __author__ = 'Tony Narlock' __email__ = 'tony@git-pull.com' __license__ = 'BSD' __copyright__ = 'Copyright 2013-2016 Tony Narlock'
__title__ = 'vcspull' __package_name__ = 'vcspull' __description__ = 'vcs project manager' __version__ = '1.0.0' __author__ = 'Tony Narlock' __email__ = 'tony@git-pull.com' __license__ = 'BSD' __copyright__ = 'Copyright 2013-2016 Tony Narlock'
Python
0.000001
42561d709a2ecfee71103dfbb55116cec1128b71
fix redirect after upload
website/apps/home/views/UploadView.py
website/apps/home/views/UploadView.py
#!/bin/env python2 # -*- coding: utf-8 -*- # # This file is part of the VecNet Zika modeling interface. # For copyright and licensing information about this package, see the # NOTICE.txt and LICENSE.txt files in its top-level directory; they are # available at https://github.com/vecnet/zika # # This Source Code Form is subject to the terms of the Mozilla Public # License (MPL), version 2.0. If a copy of the MPL was not distributed # with this file, You can obtain one at http://mozilla.org/MPL/2.0/. import logging from django.core.urlresolvers import reverse from django.db import transaction from django.http.response import HttpResponseBadRequest, HttpResponseRedirect from django.views.generic.base import TemplateView from website.apps.home.utils import load_simulation_file logger = logging.getLogger(__name__) class UploadView(TemplateView): template_name = "../templates/simulation/upload.html" @transaction.atomic def post(self, request, *args, **kwargs): if request.method == 'POST': if not request.FILES['output_file']: return HttpResponseBadRequest("No 'output_file' is provided") else: sim_name = self.request.POST.get(u"name", None) is_historical = self.request.POST.get("historical") load_simulation_file(request.FILES['output_file'], simulation_name=sim_name, is_historical=is_historical) # Redirect to appropriate page whether uploading simulation or historical if is_historical!='on': return HttpResponseRedirect(reverse('home.display_simulations')) else: return HttpResponseRedirect(reverse('home.display_historical')) else: return HttpResponseRedirect("")
#!/bin/env python2 # -*- coding: utf-8 -*- # # This file is part of the VecNet Zika modeling interface. # For copyright and licensing information about this package, see the # NOTICE.txt and LICENSE.txt files in its top-level directory; they are # available at https://github.com/vecnet/zika # # This Source Code Form is subject to the terms of the Mozilla Public # License (MPL), version 2.0. If a copy of the MPL was not distributed # with this file, You can obtain one at http://mozilla.org/MPL/2.0/. import logging from django.core.urlresolvers import reverse from django.db import transaction from django.http.response import HttpResponseBadRequest, HttpResponseRedirect from django.views.generic.base import TemplateView from website.apps.home.utils import load_simulation_file logger = logging.getLogger(__name__) class UploadView(TemplateView): template_name = "../templates/simulation/upload.html" @transaction.atomic def post(self, request, *args, **kwargs): if request.method == 'POST': if not request.FILES['output_file']: return HttpResponseBadRequest("No 'output_file' is provided") else: sim_name = self.request.POST.get(u"name", None) is_historical = self.request.POST.get("historical") load_simulation_file(request.FILES['output_file'], simulation_name=sim_name, is_historical=is_historical) return HttpResponseRedirect(reverse('home.display_simulations')) else: return HttpResponseRedirect("")
Python
0
c9a915692b30458717ead2f83fce77ce295e5ed9
add recipe_folder member (#10527)
conans/pylint_plugin.py
conans/pylint_plugin.py
"""Pylint plugin for ConanFile""" import astroid from astroid import MANAGER def register(linter): """Declare package as plugin This function needs to be declared so astroid treats current file as a plugin. """ pass def transform_conanfile(node): """Transform definition of ConanFile class so dynamic fields are visible to pylint""" str_class = astroid.builtin_lookup("str") info_class = MANAGER.ast_from_module_name("conans.model.info").lookup( "ConanInfo") build_requires_class = MANAGER.ast_from_module_name( "conans.client.graph.graph_manager").lookup("_RecipeBuildRequires") file_copier_class = MANAGER.ast_from_module_name( "conans.client.file_copier").lookup("FileCopier") file_importer_class = MANAGER.ast_from_module_name( "conans.client.importer").lookup("_FileImporter") python_requires_class = MANAGER.ast_from_module_name( "conans.client.graph.python_requires").lookup("PyRequires") dynamic_fields = { "conan_data": str_class, "build_requires": build_requires_class, "info_build": info_class, "info": info_class, "copy": file_copier_class, "copy_deps": file_importer_class, "python_requires": [str_class, python_requires_class], "recipe_folder": str_class, } for f, t in dynamic_fields.items(): node.locals[f] = [t] MANAGER.register_transform( astroid.ClassDef, transform_conanfile, lambda node: node.qname() == "conans.model.conan_file.ConanFile") def _python_requires_member(): return astroid.parse(""" from conans.client.graph.python_requires import ConanPythonRequire python_requires = ConanPythonRequire() """) astroid.register_module_extender(astroid.MANAGER, "conans", _python_requires_member)
"""Pylint plugin for ConanFile""" import astroid from astroid import MANAGER def register(linter): """Declare package as plugin This function needs to be declared so astroid treats current file as a plugin. """ pass def transform_conanfile(node): """Transform definition of ConanFile class so dynamic fields are visible to pylint""" str_class = astroid.builtin_lookup("str") info_class = MANAGER.ast_from_module_name("conans.model.info").lookup( "ConanInfo") build_requires_class = MANAGER.ast_from_module_name( "conans.client.graph.graph_manager").lookup("_RecipeBuildRequires") file_copier_class = MANAGER.ast_from_module_name( "conans.client.file_copier").lookup("FileCopier") file_importer_class = MANAGER.ast_from_module_name( "conans.client.importer").lookup("_FileImporter") python_requires_class = MANAGER.ast_from_module_name( "conans.client.graph.python_requires").lookup("PyRequires") dynamic_fields = { "conan_data": str_class, "build_requires": build_requires_class, "info_build": info_class, "info": info_class, "copy": file_copier_class, "copy_deps": file_importer_class, "python_requires": [str_class, python_requires_class], } for f, t in dynamic_fields.items(): node.locals[f] = [t] MANAGER.register_transform( astroid.ClassDef, transform_conanfile, lambda node: node.qname() == "conans.model.conan_file.ConanFile") def _python_requires_member(): return astroid.parse(""" from conans.client.graph.python_requires import ConanPythonRequire python_requires = ConanPythonRequire() """) astroid.register_module_extender(astroid.MANAGER, "conans", _python_requires_member)
Python
0
4b5ae262bab0bc0c83555d39400049f20aaca9cd
Add CONVERSATION_LABEL_MAX_LENGTH constant
chatterbot/constants.py
chatterbot/constants.py
""" ChatterBot constants """ ''' The maximum length of characters that the text of a statement can contain. This should be enforced on a per-model basis by the data model for each storage adapter. ''' STATEMENT_TEXT_MAX_LENGTH = 400 ''' The maximum length of characters that the text label of a conversation can contain. The number 32 was chosen because that is the length of the string representation of a UUID4 with no hyphens. ''' CONVERSATION_LABEL_MAX_LENGTH = 32 # The maximum length of characters that the name of a tag can contain TAG_NAME_MAX_LENGTH = 50 DEFAULT_DJANGO_APP_NAME = 'django_chatterbot'
""" ChatterBot constants """ ''' The maximum length of characters that the text of a statement can contain. This should be enforced on a per-model basis by the data model for each storage adapter. ''' STATEMENT_TEXT_MAX_LENGTH = 400 # The maximum length of characters that the name of a tag can contain TAG_NAME_MAX_LENGTH = 50 DEFAULT_DJANGO_APP_NAME = 'django_chatterbot'
Python
0.999974
7a1e57fa5c6d2c6330a73e8fab95c5ef6fa0ea35
Fix indentation
tomviz/python/SetNegativeVoxelsToZero.py
tomviz/python/SetNegativeVoxelsToZero.py
def transform_scalars(dataset): """Set negative voxels to zero""" from tomviz import utils import numpy as np data = utils.get_array(dataset) data[data<0] = 0 #set negative voxels to zero # set the result as the new scalars. utils.set_array(dataset, data)
def transform_scalars(dataset): """Set negative voxels to zero""" from tomviz import utils import numpy as np data = utils.get_array(dataset) data[data<0] = 0 #set negative voxels to zero # set the result as the new scalars. utils.set_array(dataset, data)
Python
0.017244
e504ef393f9f11d243fed88b2e4acc1566ea912c
Delete unread messages
scripts/read.py
scripts/read.py
import time import cache import vkapi from log import datetime_format def main(a, args): dialogs = a.messages.getDialogs(unread=1)['items'] messages = {} users = [] chats = [] for msg in dialogs: def cb(req, resp): messages[req['peer_id']] = resp['items'][::-1] a.messages.getHistory.delayed(peer_id=vkapi.utils.getSender(msg['message']), count=min(msg['unread'], 10)).callback(cb) if 'chat_id' in msg['message']: chats.append(msg['message']['chat_id']) else: users.append(msg['message']['user_id']) uc = cache.UserCache(a, 'online') cc = cache.ConfCache(a) uc.load(users) cc.load(chats) a.sync() mids = [] if dialogs: print('-------------------------\n') else: print('Nothing here') for msg in dialogs: m = msg['message'] if 'chat_id' in m: print('Chat "{}" ({}): {}'.format(cc[m['chat_id']]['title'], m['chat_id'], msg['unread'])) else: print('{} {} ({}){}: {}'.format(uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name'], m['user_id'], ', online' if uc[m['user_id']]['online'] else '', msg['unread'])) print() for i in messages[vkapi.utils.getSender(msg['message'])]: print('[{}] {}'.format(time.strftime(datetime_format, time.localtime(i['date'])), i['body'])) print() if 'chat_id' not in m: mids.append(i['id']) print('-------------------------\n') if 't' in args: print(flush=True) mr = vkapi.MessageReceiver(a) while True: time.sleep(1) for m in mr.getMessages(): if 'chat_id' in m: print('Chat "{}" ({}), {} {}:'.format(cc[m['chat_id']]['title'], m['chat_id'], uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name'])) else: print('{} {} ({}):'.format(uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name'], m['user_id'])) print('[{}] {}'.format(time.strftime(datetime_format, time.localtime(m['date'])), m['body'])) print(flush=True) elif 'd' in args and mids: print('Deleting {} messages'.format(len(mids))) a.messages.delete(message_ids=','.join(map(str, mids)))
import time import cache import vkapi from log import datetime_format def main(a, args): dialogs = a.messages.getDialogs(unread=1)['items'] messages = {} users = [] chats = [] for msg in dialogs: def cb(req, resp): messages[req['peer_id']] = resp['items'][::-1] a.messages.getHistory.delayed(peer_id=vkapi.utils.getSender(msg['message']), count=min(msg['unread'], 10)).callback(cb) if 'chat_id' in msg['message']: chats.append(msg['message']['chat_id']) else: users.append(msg['message']['user_id']) uc = cache.UserCache(a, 'online') cc = cache.ConfCache(a) uc.load(users) cc.load(chats) a.sync() if dialogs: print('-------------------------\n') else: print('Nothing here') for msg in dialogs: m = msg['message'] if 'chat_id' in m: print('Chat "{}" ({}): {}'.format(cc[m['chat_id']]['title'], m['chat_id'], msg['unread'])) else: print('{} {} ({}){}: {}'.format(uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name'], m['user_id'], ', online' if uc[m['user_id']]['online'] else '', msg['unread'])) print() for i in messages[vkapi.utils.getSender(msg['message'])]: print('[{}] {}'.format(time.strftime(datetime_format, time.localtime(i['date'])), i['body'])) print() print('-------------------------\n') if args: print(flush=True) mr = vkapi.MessageReceiver(a) while True: time.sleep(1) for m in mr.getMessages(): if 'chat_id' in m: print('Chat "{}" ({}), {} {}:'.format(cc[m['chat_id']]['title'], m['chat_id'], uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name'])) else: print('{} {} ({}):'.format(uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name'], m['user_id'])) print('[{}] {}'.format(time.strftime(datetime_format, time.localtime(m['date'])), m['body'])) print(flush=True)
Python
0.000015
46e2997cb51e45dc58f5a97cea6642ba64d03188
Fix 9.0 version
purchase_all_shipments/__openerp__.py
purchase_all_shipments/__openerp__.py
# Author: Leonardo Pistone # Copyright 2015 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. {'name': 'Purchase All Shipments', 'version': '9.0.1.0.0', 'author': "Camptocamp,Odoo Community Association (OCA)", 'category': 'Purchases', 'license': 'AGPL-3', 'depends': ['purchase'], 'data': ['view/purchase_order.xml'], }
# Author: Leonardo Pistone # Copyright 2015 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. {'name': 'Purchase All Shipments', 'version': '8.0.1.0.0', 'author': "Camptocamp,Odoo Community Association (OCA)", 'category': 'Purchases', 'license': 'AGPL-3', 'depends': ['purchase'], 'data': ['view/purchase_order.xml'], }
Python
0
5aca45a68a229f43a25dd97d2c680716c9baabf5
add travis env to sgen
scripts/sgen.py
scripts/sgen.py
#!/usr/bin/python # Generate original static file to another with new prefix # ./sgen index.html old_prefix static_index.html new_prefix import sys from os import walk, path, environ # File lists # The two file lists should be aligned. root = environ['TRAVIS_BUILD_DIR'] files = [] for (dirpath, dirname, filenames) in walk( root + "/static"): for f in filenames: if ".html" in f: files.append(dirpath + "/" + f) # prefix of target files target_prefix = root + "/docs" target_files = [] for f in files: target_files.append(f.replace( root + "/static", target_prefix)) print(target_files) # Variables of parsing def parse_args(): if len(sys.argv) < 3: print ("Not enough arguments") exit(1) original_prefix = sys.argv[1] new_prefix = sys.argv[2] # unsafe checkout prefix if original_prefix[0] != 'h' or original_prefix[-1] != '/' or new_prefix[0] != 'h' or new_prefix[-1] != '/': print ("Seems something wrong on the prefix") exit(1) return original_prefix, new_prefix def sgen(): original_prefix, new_prefix = parse_args() # parse the publications_ref into the appropriate html format for i in range(len(files)): with open(files[i]) as f: content = f.read() new_content = content.replace(original_prefix, new_prefix) with open(target_files[i], "w+") as f: f.write(new_content) sgen()
#!/usr/bin/python # Generate original static file to another with new prefix # ./sgen index.html old_prefix static_index.html new_prefix import sys from os import walk, path # File lists # The two file lists should be aligned. files = [] for (dirpath, dirname, filenames) in walk("../static"): for f in filenames: if ".html" in f: files.append(dirpath + "/" + f) # prefix of target files target_prefix = "../docs" target_files = [] for f in files: target_files.append(f.replace("../static", target_prefix)) print(target_files) # Variables of parsing def parse_args(): if len(sys.argv) < 3: print ("Not enough arguments") exit(1) original_prefix = sys.argv[1] new_prefix = sys.argv[2] # unsafe checkout prefix if original_prefix[0] != 'h' or original_prefix[-1] != '/' or new_prefix[0] != 'h' or new_prefix[-1] != '/': print ("Seems something wrong on the prefix") exit(1) return original_prefix, new_prefix def sgen(): original_prefix, new_prefix = parse_args() # parse the publications_ref into the appropriate html format for i in range(len(files)): with open(files[i]) as f: content = f.read() new_content = content.replace(original_prefix, new_prefix) with open(target_files[i], "w+") as f: f.write(new_content) sgen()
Python
0
24cebbd351875103067162733cf682320df29cf6
Update VMfileconvert_V2.py
pyecog/light_code/VMfileconvert_V2.py
pyecog/light_code/VMfileconvert_V2.py
import glob, os, numpy, sys try: import stfio except: sys.path.append('C:\Python27\Lib\site-packages') import stfio def main(): searchpath = os.getcwd() exportdirectory = searchpath+'/ConvertedFiles/' # Make export directory if not os.path.exists(exportdirectory): os.makedirs(exportdirectory) # Walk through and find abf files pattern = '*.abf' datafilenames = glob.glob(pattern) if datafilenames: for filename in datafilenames: print ('Converting '+str(filename)) data = stfio.read(filename,ftype = "abf") x = data.aspandas() x = x.values numpy.save(exportdirectory+filename[0:-4],x) if __name__ == '__main__': main()
import glob, os, numpy import stfio def main(): searchpath = os.getcwd() exportdirectory = searchpath+'/ConvertedFiles/' # Make export directory if not os.path.exists(exportdirectory): os.makedirs(exportdirectory) # Walk through and find abf files pattern = '*.abf' datafilenames = glob.glob(pattern) if datafilenames: for filename in datafilenames: print ('Converting '+str(filename)) data = stfio.read(filename,ftype = "abf") x = data.aspandas() x = x.values numpy.save(exportdirectory+filename[0:-4],x) if __name__ == '__main__': main()
Python
0
52cbd272ec08a382b4f16dca1579a3ef72365069
use numpy mean
examples/train_multi_gpu.py
examples/train_multi_gpu.py
''' Created on Feb 6, 2017 @author: julien ''' import numpy from os.path import join import tempfile from keras.metrics import categorical_accuracy from examples.ga.dataset import get_reuters_dataset from minos.experiment.experiment import Experiment, ExperimentParameters from minos.experiment.training import Training, AccuracyDecreaseStoppingCondition,\ get_associated_validation_metric from minos.model.design import create_random_blueprint from minos.model.model import Objective, Optimizer, Metric, Layout from minos.train.trainer import ModelTrainer from minos.utils import setup_console_logging import numpy as np np.random.seed(1337) def create_experiment(input_size, output_size, batch_size): training = Training( objective=Objective('categorical_crossentropy'), optimizer=Optimizer(optimizer='Adam'), metric=Metric('categorical_accuracy'), stopping=AccuracyDecreaseStoppingCondition( metric='categorical_accuracy', min_epoch=5, max_epoch=25, noprogress_count=5), batch_size=batch_size) parameters = ExperimentParameters(use_default_values=True) layout = Layout( input_size=input_size, output_size=output_size, output_activation='softmax') experiment = Experiment( label='reuters_train_multi_gpu', layout=layout, training=training, parameters=parameters) return experiment def train_multi_gpu(max_words = 1000, batch_size=32): batch_iterator, test_batch_iterator, nb_classes = get_reuters_dataset(batch_size, max_words) experiment = create_experiment(max_words, nb_classes, batch_size) blueprint = create_random_blueprint(experiment) devices = ['/gpu:0', '/gpu:1'] trainer = ModelTrainer(batch_iterator, test_batch_iterator) with tempfile.TemporaryDirectory() as tmp_dir: model, history, _duration = trainer.train( blueprint, devices, save_best_model=True, model_filename=join(tmp_dir, 'model')) metric = get_associated_validation_metric(blueprint.training.metric.metric) epoch = numpy.argmax(history.history[metric]) score = history.history[metric][epoch] print('Final training score %r after %d epoch' % (score, epoch)) test_size = 10 y_true = numpy.argmax(test_batch_iterator.y[0][:test_size]) y_pred = numpy.argmax(model.predict(test_batch_iterator.X[0][:test_size])) evaluation = numpy.mean(y_true==y_pred) print('Final evaluation score %f' % evaluation) print('Predictions (true, pred) %r' % list(zip(y_true.tolist(), y_pred.tolist()))) def main(): setup_console_logging('DEBUG') train_multi_gpu() if __name__ == '__main__': main()
''' Created on Feb 6, 2017 @author: julien ''' import numpy from os.path import join import tempfile from keras.metrics import categorical_accuracy from examples.ga.dataset import get_reuters_dataset from minos.experiment.experiment import Experiment, ExperimentParameters from minos.experiment.training import Training, AccuracyDecreaseStoppingCondition,\ get_associated_validation_metric from minos.model.design import create_random_blueprint from minos.model.model import Objective, Optimizer, Metric, Layout from minos.train.trainer import ModelTrainer from minos.utils import setup_console_logging import numpy as np np.random.seed(1337) def create_experiment(input_size, output_size, batch_size): training = Training( objective=Objective('categorical_crossentropy'), optimizer=Optimizer(optimizer='Adam'), metric=Metric('categorical_accuracy'), stopping=AccuracyDecreaseStoppingCondition( metric='categorical_accuracy', min_epoch=5, max_epoch=25, noprogress_count=5), batch_size=batch_size) parameters = ExperimentParameters(use_default_values=True) layout = Layout( input_size=input_size, output_size=output_size, output_activation='softmax') experiment = Experiment( label='reuters_train_multi_gpu', layout=layout, training=training, parameters=parameters) return experiment def train_multi_gpu(max_words = 1000, batch_size=32): batch_iterator, test_batch_iterator, nb_classes = get_reuters_dataset(batch_size, max_words) experiment = create_experiment(max_words, nb_classes, batch_size) blueprint = create_random_blueprint(experiment) devices = ['/gpu:0', '/gpu:1'] trainer = ModelTrainer(batch_iterator, test_batch_iterator) with tempfile.TemporaryDirectory() as tmp_dir: model, history, _duration = trainer.train( blueprint, devices, save_best_model=True, model_filename=join(tmp_dir, 'model')) metric = get_associated_validation_metric(blueprint.training.metric.metric) epoch = numpy.argmax(history.history[metric]) score = history.history[metric][epoch] print('Final training score %r after %d epoch' % (score, epoch)) test_size = 10 y_true = test_batch_iterator.y[0][:test_size] y_pred = model.predict(test_batch_iterator.X[0][:test_size]) print('Predictions (true, pred) %r' % list(zip(y_true.tolist(), y_pred.tolist()))) evaluation = numpy.mean(numpy.argmax(y_true)==numpy.argmax(y_pred)) print('Final evaluation score %f' % evaluation) def main(): setup_console_logging('DEBUG') train_multi_gpu() if __name__ == '__main__': main()
Python
0.000225
a49ddd64758b23565870439bda36fafd5e1dac39
Put survey back otherwise
gwells/urls.py
gwells/urls.py
""" Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from django.conf import settings from django.conf.urls import include, url from django.contrib import admin from . import views from gwells.views import * from gwells.views.admin import * from django.views.generic import TemplateView # Creating 2 versions of the app_root. One without and one with trailing slash # This will allow for any or no additional app_root context to be provided app_root = settings.APP_CONTEXT_ROOT if app_root: app_root_slash = app_root + '/' else: app_root_slash = app_root urlpatterns = [ # url(r'^'+ app_root +'$', views.HomeView.as_view(), name='home'), url(r'^'+ app_root_slash +'robots\.txt$', TemplateView.as_view(template_name='robots.txt', content_type='text/plain'), name='robots'), url(r'^'+ app_root_slash +'$', SearchView.well_search, name='home'), url(r'^'+ app_root_slash +'search$', SearchView.well_search, name='search'), # url(r'^(?P<pk>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/$', views.DetailView.as_view(), name='detail'), url(r'^'+ app_root_slash +'well/(?P<pk>[0-9]+)$', WellDetailView.as_view(), name='well_detail'), url(r'^'+ app_root_slash +'registry-legacy$', RegistryView.as_view(), name='registry-legacy'), url(r'^'+ app_root_slash +'submission/(?P<pk>[0-9]+)$', ActivitySubmissionDetailView.as_view(), name='activity_submission_detail'), url(r'^'+ app_root_slash +'health$', HealthView.health, name='health'), url(r'^'+ app_root_slash +'groundwater-information', TemplateView.as_view(template_name='gwells/groundwater_information.html'), name='groundwater_information'), url(r'^'+ app_root_slash +'ajax/map_well_search/$', SearchView.map_well_search, name='map_well_search'), url(r'^'+ app_root_slash +'registries/', include('registries.urls')), ] if settings.ENABLE_DATA_ENTRY: urlpatterns = [ url(r'^'+ app_root_slash +'submission/$', ActivitySubmissionListView.as_view(), name='activity_submission_list'), url(r'^'+ app_root_slash +'submission/create$', ActivitySubmissionWizardView.as_view(views.FORMS), name='activity_submission_create'), url(r'^'+ app_root_slash +'site_admin', AdminView.as_view(), name='site_admin'), url(r'^'+ app_root_slash +'admin/survey', SurveyView.as_view(), name='survey'), url(r'^'+ app_root_slash +'admin/survey/(?P<pk>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})$', SurveyView.as_view(), name='survey'), ] + urlpatterns if settings.DEBUG: import debug_toolbar urlpatterns = [ url(r'^__debug__/', include(debug_toolbar.urls)), url(r'^'+ app_root_slash +'admin/', include(admin.site.urls)), ] + urlpatterns
""" Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from django.conf import settings from django.conf.urls import include, url from django.contrib import admin from . import views from gwells.views import * from gwells.views.admin import * from django.views.generic import TemplateView # Creating 2 versions of the app_root. One without and one with trailing slash # This will allow for any or no additional app_root context to be provided app_root = settings.APP_CONTEXT_ROOT if app_root: app_root_slash = app_root + '/' else: app_root_slash = app_root urlpatterns = [ # url(r'^'+ app_root +'$', views.HomeView.as_view(), name='home'), url(r'^'+ app_root_slash +'robots\.txt$', TemplateView.as_view(template_name='robots.txt', content_type='text/plain'), name='robots'), url(r'^'+ app_root_slash +'$', SearchView.well_search, name='home'), url(r'^'+ app_root_slash +'search$', SearchView.well_search, name='search'), # url(r'^(?P<pk>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/$', views.DetailView.as_view(), name='detail'), url(r'^'+ app_root_slash +'well/(?P<pk>[0-9]+)$', WellDetailView.as_view(), name='well_detail'), url(r'^'+ app_root_slash +'registry-legacy$', RegistryView.as_view(), name='registry-legacy'), url(r'^'+ app_root_slash +'submission/(?P<pk>[0-9]+)$', ActivitySubmissionDetailView.as_view(), name='activity_submission_detail'), url(r'^'+ app_root_slash +'health$', HealthView.health, name='health'), url(r'^'+ app_root_slash +'groundwater-information', TemplateView.as_view(template_name='gwells/groundwater_information.html'), name='groundwater_information'), url(r'^'+ app_root_slash +'ajax/map_well_search/$', SearchView.map_well_search, name='map_well_search'), url(r'^'+ app_root_slash +'registries/', include('registries.urls')), ] if settings.ENABLE_DATA_ENTRY: urlpatterns = [ url(r'^'+ app_root_slash +'submission/$', ActivitySubmissionListView.as_view(), name='activity_submission_list'), url(r'^'+ app_root_slash +'submission/create$', ActivitySubmissionWizardView.as_view(views.FORMS), name='activity_submission_create'), url(r'^'+ app_root_slash +'site_admin', AdminView.as_view(), name='site_admin'), url(r'^'+ app_root_slash +'admin/survey/(?P<pk>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})$', SurveyView.as_view(), name='survey'), ] + urlpatterns if settings.DEBUG: import debug_toolbar urlpatterns = [ url(r'^__debug__/', include(debug_toolbar.urls)), url(r'^'+ app_root_slash +'admin/', include(admin.site.urls)), ] + urlpatterns
Python
0
393bde7e7f3902f734e8c01f265b216f2d3eef26
remove leftover
models/dulsine_commons.py
models/dulsine_commons.py
# -*- coding: utf-8 -*- # vim: set ts=4 # Common enumerations used in some places CIVILITES = ( ('M.', 'Monsieur'), ('Mme', 'Madame'), ('Mlle', 'Mademoiselle') ) CIRCUITS = ( ('O', 'ouvert'), ('F', 'ferme'), ('N', 'pas de circuit') ) TYPES_ACTEURS = ( ('P', 'Professionnels'), ('A', 'Amateurs'), ('M', 'Mixte') ) TEAM_TYPES = ( (0, 'PAPS'), (1, 'Equipe'), (2, 'Binome'), (3, 'Equipe d\'Evacuation') ) DIPLOME_CI = 0 DIPLOME_PSE2 = 1 DIPLOME_PSE1 = 2 DIPLOME_PSC1 = 3 DIPLOME_SECOURS = ( (DIPLOME_CI, 'CI'), (DIPLOME_PSE2, 'PSE2'), (DIPLOME_PSE1, 'PSE1'), (DIPLOME_PSC1, 'PSC1'), (4, 'IPS'), (5, 'CDPE') ) DIPLOME_CONDUCTEURS = ( (10, 'CH'), (11, 'CHA'), (12, '4x4') ) DIPLOME_FORMATEURS = ( (20, 'FCI'), (21, 'PAE1'), (22, 'PAE2'), (23, 'PAE3'), (24, 'PAE4'), ) FORMATIONS = DIPLOME_SECOURS + DIPLOME_CONDUCTEURS + DIPLOME_FORMATEURS WISH_ND = 0 WISH_CHOICES = ( (WISH_ND, 'N.D.'), (1, 'Disponible'), (2, 'Intéressé'), (3, 'Très intéressé'), )
# -*- coding: utf-8 -*- # vim: set ts=4 # Common enumerations used in some places CIVILITES = ( ('M.', 'Monsieur'), ('Mme', 'Madame'), ('Mlle', 'Mademoiselle') ) CIRCUITS = ( ('O', 'ouvert'), ('F', 'ferme'), ('N', 'pas de circuit') ) TYPES_ACTEURS = ( ('P', 'Professionnels'), ('A', 'Amateurs'), ('M', 'Mixte') ) TEAM_TYPES = ( (0, 'PAPS'), (1, 'Equipe'), (2, 'Binome'), (3, 'Equipe d\'Evacuation') ) DIPLOME_SECOURS = ( (0, 'N.D.'), (1, 'CI'), (2, 'PSE2'), (3, 'PSE1'), (4, 'PSC1'), (5, 'IPS'), (6, 'CDPE') ) NOT_AVAILABLE = 0 DIPLOME_CI = 1 DIPLOME_PSE2 = 2 DIPLOME_PSE1 = 3 DIPLOME_PSC1 = 4 DIPLOME_CONDUCTEURS = ( (10, 'CH'), (11, 'CHA'), (12, '4x4') ) DIPLOME_FORMATEURS = ( (20, 'FCI'), (21, 'PAE1'), (22, 'PAE2'), (23, 'PAE3'), (24, 'PAE4'), ) FORMATIONS = DIPLOME_SECOURS + DIPLOME_CONDUCTEURS + DIPLOME_FORMATEURS WISH_ND = 0 WISH_CHOICES = ( (WISH_ND, 'N.D.'), (1, 'Disponible'), (2, 'Intéressé'), (3, 'Très intéressé'), )
Python
0.000017
f9a99102a7053e444021926d08750f04a662fd9f
remove unnecessary print statements
pyspecdata/load_files/open_subpath.py
pyspecdata/load_files/open_subpath.py
from ..core import * from ..datadir import dirformat import os.path from zipfile import ZipFile def open_subpath(file_reference,*subpath,**kwargs): """ Parameters ---------- file_reference: str or tuple If a string, then it's the name of a directory. If it's a tuple, then, it has three elements: the ZipFile object, the filename of the zip file (for reference), and the name of the file we're interested in within the zip file. test_only: bool just test if the path exists """ mode,test_only = process_kwargs([('mode','r'), ('test_only',False)],kwargs) if isinstance(file_reference,basestring): if test_only: full_path = os.path.join(file_reference, *subpath) if os.path.exists(full_path): return True else: return False else: fp = open(os.path.join(file_reference,*subpath),mode) else: if type(file_reference) == tuple: if len(file_reference) == 3 and type(file_reference[0]) is ZipFile: zf = file_reference[0] zip_basename = file_reference[1] name_inside_zip = file_reference[2] subfile = '/'.join((name_inside_zip,)+subpath) if test_only: if subfile in zf.namelist(): return True else: return False if subfile in zf.namelist(): return zf.open(subfile) else: raise ValueError(subfile+" not found in zip file") else: raise ValueError("open_subpath doesn't understand the format of the tuple passe to file_reference") else: raise ValueError("open_subpath doesn't understand the type of the file_reference") return fp
from ..core import * from ..datadir import dirformat import os.path from zipfile import ZipFile def open_subpath(file_reference,*subpath,**kwargs): """ Parameters ---------- file_reference: str or tuple If a string, then it's the name of a directory. If it's a tuple, then, it has three elements: the ZipFile object, the filename of the zip file (for reference), and the name of the file we're interested in within the zip file. test_only: bool just test if the path exists """ mode,test_only = process_kwargs([('mode','r'), ('test_only',False)],kwargs) if isinstance(file_reference,basestring): if test_only: print "testing",(file_reference,) + subpath full_path = os.path.join(file_reference, *subpath) if os.path.exists(full_path): return True else: return False else: fp = open(os.path.join(file_reference,*subpath),mode) else: if type(file_reference) == tuple: if len(file_reference) == 3 and type(file_reference[0]) is ZipFile: zf = file_reference[0] zip_basename = file_reference[1] name_inside_zip = file_reference[2] subfile = '/'.join((name_inside_zip,)+subpath) if test_only: if subfile in zf.namelist(): return True else: return False if subfile in zf.namelist(): return zf.open(subfile) else: raise ValueError(subfile+" not found in zip file") else: raise ValueError("open_subpath doesn't understand the format of the tuple passe to file_reference") else: raise ValueError("open_subpath doesn't understand the type of the file_reference") return fp
Python
0.999982
ba370231fe80280dec806c7c2515061e8607b360
Add SCA into mbio
Correlation/__init__.py
Correlation/__init__.py
__author__ = 'Wenzhi Mao' __all__ = [] def _Startup(): from mbio import _ABSpath global _path__ _path__ = _ABSpath() from os import path Clist = ['mi.c', 'omes.c'] for c in Clist: if not path.exists(_path__+'/'+c.replace('.c', '_c.so')): from mbio import _make _make(_path__+'/'+c) _Startup() from . import MI from .MI import * __all__.extend(MI.__all__) from . import OMES from .OMES import * __all__.extend(OMES.__all__) from . import SCA from .SCA import * __all__.extend(SCA.__all__)
__author__ = 'Wenzhi Mao' __all__ = [] def _Startup(): from mbio import _ABSpath global _path__ _path__ = _ABSpath() from os import path Clist = ['mi.c', 'omes.c'] for c in Clist: if not path.exists(_path__+'/'+c.replace('.c', '_c.so')): from mbio import _make _make(_path__+'/'+c) _Startup() from . import MI from .MI import * __all__.extend(MI.__all__) from . import OMES from .OMES import * __all__.extend(OMES.__all__)
Python
0.99929
2277c82efdc456e5873987eabac88810b2cece5b
Fix pep8 whitespace violation.
lms/djangoapps/courseware/features/video.py
lms/djangoapps/courseware/features/video.py
#pylint: disable=C0111 from lettuce import world, step from common import * ############### ACTIONS #################### @step('when I view it it does autoplay') def does_autoplay(step): assert(world.css_find('.video')[0]['data-autoplay'] == 'True') @step('the course has a Video component') def view_video(step): coursename = TEST_COURSE_NAME.replace(' ', '_') i_am_registered_for_the_course(step, coursename) # Make sure we have a video video = add_video_to_course(coursename) chapter_name = TEST_SECTION_NAME.replace(" ", "_") section_name = chapter_name url = django_url('/courses/edx/Test_Course/Test_Course/courseware/%s/%s' % (chapter_name, section_name)) world.browser.visit(url) def add_video_to_course(course): template_name = 'i4x://edx/templates/video/default' world.ItemFactory.create(parent_location=section_location(course), template=template_name, display_name='Video')
#pylint: disable=C0111 from lettuce import world, step from common import * ############### ACTIONS #################### @step('when I view it it does autoplay') def does_autoplay(step): assert(world.css_find('.video')[0]['data-autoplay'] == 'True') @step('the course has a Video component') def view_video(step): coursename = TEST_COURSE_NAME.replace(' ', '_') i_am_registered_for_the_course(step, coursename) # Make sure we have a video video = add_video_to_course(coursename) chapter_name = TEST_SECTION_NAME.replace(" ", "_") section_name = chapter_name url = django_url('/courses/edx/Test_Course/Test_Course/courseware/%s/%s' % (chapter_name, section_name)) world.browser.visit(url) def add_video_to_course(course): template_name = 'i4x://edx/templates/video/default' world.ItemFactory.create(parent_location=section_location(course), template=template_name, display_name='Video')
Python
0.000003
54b0feebb18816a936f4a7f323a77808f9973eb2
Update testes.py
Src/testes.py
Src/testes.py
import jogovelha import sys erroInicializar = False jogo = jogovelha.inicializar() if len(jogo) != 3: erroInicializar = True else: for linha in jogo: if len(linha) != 3: erroInicializar = True else: for elemento in linha: if elemento != "X": erroInicializar = True if erroInicializar: sys.exit(1) else: sys.exit(0)
import jogovelha import sys erroInicializar = False jogo = jogovelha.inicializar() if len(jogo) != 3: erroInicializar = True else: for linha in jogo: if len(linha) != 3: erroInicializar = True else: for elemento in linha: if elemento != ".": erroInicializar = True if erroInicializar: sys.exit(1) else: sys.exit(0)
Python
0
73ceff96b2f065517a7d67cb0b25361f5bd61388
Delete fixture after running tests
src/gramcore/filters/tests/test_edges.py
src/gramcore/filters/tests/test_edges.py
"""Tests for module gramcore.filters.edges""" import os import numpy from PIL import Image, ImageDraw from nose.tools import assert_equal from skimage import io from gramcore.filters import edges def setup(): """Create image fixture The background color is set by default to black (value == 0). .. note:: Although the rectangle should be 10x10 in reality it returns an 11x11. If the image is read with io.imread, then the colored pixels and their neighbours can be accessed with arr[9:22, 4:17]. """ img = Image.new('L', (20, 40)) draw = ImageDraw.Draw(img) draw.rectangle([(5, 10), (15, 20)], fill=255) img.save('white-square.tif') del draw def teardown(): """Delete fixture""" os.remove('white-square.tif') def test_canny(): """Apply canny to grey image and check return values .. warning:: This seems to produce some artifacts. The fixture is a black image with a white 11x11 rectangle. Thus you expect you get 44 (4*11) pixels of edges. Instead it gets 50, when sigma is 1 and 40 when sigma is 2. In both cases the shape is not correct. """ img = io.imread('white-square.tif') parameters = {'data': [img], 'sigma': 1.0} result = edges.canny(parameters) # this should be 44 check the resulting image with #result *= 255 #io.imsave('result.tif', result) assert_equal(result.sum(), 50) def test_prewitt(): """Apply prewitt to grey image and check return values .. note:: This produces correct shape though it shrinks it by 2 pixels, there are no edge pixels on the corners and each edge has a width of 2 pixels. Based on the original rectangle size, which is 11x11, and the above issues it returns 4*9*2 = 72 edge pixels. """ img = io.imread('white-square.tif') parameters = {'data': [img]} result = edges.prewitt(parameters) result = result.astype('uint8') assert_equal(result.sum(), 72) def test_sobel(): """Apply sobel to grey image and check return values .. note:: This produces correct shape though it shrinks it by 2 pixels and each edge has a width of 2 pixels. Based on the original rectangle size, which is 11x11, and the above issues it returns 4*9*2 + 4 = 76 edge pixels. """ img = io.imread('white-square.tif') parameters = {'data': [img]} result = edges.sobel(parameters) result = result.astype('uint8') assert_equal(result.sum(), 76)
"""Tests for module gramcore.filters.edges""" import os import numpy from PIL import Image, ImageDraw from nose.tools import assert_equal from skimage import io from gramcore.filters import edges def setup(): """Create image fixture The background color is set by default to black (value == 0). .. note:: Although the rectangle should be 10x10 in reality it returns an 11x11. If the image is read with io.imread, then the colored pixels and their neighbours can be accessed with arr[9:22, 4:17]. """ img = Image.new('L', (20, 40)) draw = ImageDraw.Draw(img) draw.rectangle([(5, 10), (15, 20)], fill=255) img.save('white-square.tif') del draw def teardown(): """Delete fixture""" #os.remove('white-square.tif') def test_canny(): """Apply canny to grey image and check return values .. warning:: This seems to produce some artifacts. The fixture is a black image with a white 11x11 rectangle. Thus you expect you get 44 (4*11) pixels of edges. Instead it gets 50, when sigma is 1 and 40 when sigma is 2. In both cases the shape is not correct. """ img = io.imread('white-square.tif') parameters = {'data': [img], 'sigma': 1.0} result = edges.canny(parameters) # this should be 44 check the resulting image with #result *= 255 #io.imsave('result.tif', result) assert_equal(result.sum(), 50) def test_prewitt(): """Apply prewitt to grey image and check return values .. note:: This produces correct shape though it shrinks it by 2 pixels, there are no edge pixels on the corners and each edge has a width of 2 pixels. Based on the original rectangle size, which is 11x11, and the above issues it returns 4*9*2 = 72 edge pixels. """ img = io.imread('white-square.tif') parameters = {'data': [img]} result = edges.prewitt(parameters) result = result.astype('uint8') assert_equal(result.sum(), 72) def test_sobel(): """Apply sobel to grey image and check return values .. note:: This produces correct shape though it shrinks it by 2 pixels and each edge has a width of 2 pixels. Based on the original rectangle size, which is 11x11, and the above issues it returns 4*9*2 + 4 = 76 edge pixels. """ img = io.imread('white-square.tif') parameters = {'data': [img]} result = edges.sobel(parameters) result = result.astype('uint8') assert_equal(result.sum(), 76)
Python
0
bd8caf6ab48bb1fbefdced7f33edabbdf017894a
Change of names
Demo/sockets/echosvr.py
Demo/sockets/echosvr.py
#! /usr/local/python # Python implementation of an 'echo' tcp server: echo all data it receives. # # This is the simplest possible server, sevicing a single request only. import sys from socket import * # The standard echo port isn't very useful, it requires root permissions! # ECHO_PORT = 7 ECHO_PORT = 50000 + 7 BUFSIZE = 1024 def main(): if len(sys.argv) > 1: port = int(eval(sys.argv[1])) else: port = ECHO_PORT s = socket(AF_INET, SOCK_STREAM) s.bind('', port) s.listen(0) conn, (remotehost, remoteport) = s.accept() print 'connected by', remotehost, remoteport while 1: data = conn.recv(BUFSIZE) if not data: break conn.send(data) main()
#! /usr/local/python # Python implementation of an 'echo' tcp server: echo all data it receives. # # This is the simplest possible server, sevicing a single request only. import sys from socket import * # The standard echo port isn't very useful, it requires root permissions! # ECHO_PORT = 7 ECHO_PORT = 50000 + 7 BUFSIZE = 1024 def main(): if len(sys.argv) > 1: port = int(eval(sys.argv[1])) else: port = ECHO_PORT s = socket(AF_INET, SOCK_STREAM) s.bind('', port) s.listen(0) conn, (host, remoteport) = s.accept() print 'connected by', host, remoteport while 1: data = conn.recv(BUFSIZE) if not data: break conn.send(data) main()
Python
0.001373
89502d8b8b5e81ba57a16d71c895e0192eae6182
Update for pandas 17
hetio/stats.py
hetio/stats.py
import pandas import matplotlib import matplotlib.backends.backend_pdf import seaborn def get_degrees_for_metanode(graph, metanode): """ Return a dataframe that reports the degree of each metaedge for each node of kind metanode. """ metanode_to_nodes = graph.get_metanode_to_nodes() nodes = metanode_to_nodes.get(metanode, []) rows = list() for node in nodes: for metaedge, edges in node.edges.items(): rows.append((str(node), node.name, str(metaedge), len(edges))) df = pandas.DataFrame(rows, columns=['node_id', 'node_name', 'metaedge', 'degree']) return df.sort_values(['node_name', 'metaedge']) def plot_degrees_for_metanode(graph, metanode, col_wrap=2, facet_height=4): """ Plots histograms of the degree distribution of each metaedge incident to the metanode. Each metaedge receives a facet in a seaborn.FacetGrid. """ degree_df = get_degrees_for_metanode(graph, metanode) grid = seaborn.FacetGrid(degree_df, col='metaedge', sharex=False, sharey=False, col_wrap=col_wrap, size=facet_height) grid.map(seaborn.distplot, 'degree', kde=False) grid.set_titles('{col_name}') return grid def plot_degrees(graph, path): """ Creates a multipage pdf with a page for each metanode showing degree distributions. """ # Temporarily disable `figure.max_open_warning` max_open = matplotlib.rcParams['figure.max_open_warning'] matplotlib.rcParams['figure.max_open_warning'] = 0 pdf_pages = matplotlib.backends.backend_pdf.PdfPages(path) for metanode in graph.metagraph.get_nodes(): grid = plot_degrees_for_metanode(graph, metanode) grid.savefig(pdf_pages, format='pdf') pdf_pages.close() matplotlib.rcParams['figure.max_open_warning'] = max_open def get_metanode_df(graph): rows = list() for metanode, nodes in graph.get_metanode_to_nodes().items(): series = pandas.Series() series['metanode'] = metanode series['abbreviation'] = metanode.abbrev metaedges = set() for metaedge in metanode.edges: metaedges |= {metaedge, metaedge.inverse} series['metaedges'] = sum([not metaedge.inverted for metaedge in metaedges]) series['nodes'] = len(nodes) series['unconnected_nodes'] = sum(not any(node.edges.values()) for node in nodes) rows.append(series) return pandas.DataFrame(rows).sort_values('metanode') def get_metaedge_df(graph): rows = list() for metaedge, edges in graph.get_metaedge_to_edges().items(): series = pandas.Series() series['metaedge'] = str(metaedge) series['abbreviation'] = metaedge.get_abbrev() series['inverted'] = int(metaedge.inverted) series['edges'] = len(edges) series['source_nodes'] = len(set(edge.source for edge in edges)) series['target_nodes'] = len(set(edge.target for edge in edges)) rows.append(series) return pandas.DataFrame(rows).sort_values('metaedge')
import pandas import matplotlib import matplotlib.backends.backend_pdf import seaborn def get_degrees_for_metanode(graph, metanode): """ Return a dataframe that reports the degree of each metaedge for each node of kind metanode. """ metanode_to_nodes = graph.get_metanode_to_nodes() nodes = metanode_to_nodes.get(metanode, []) rows = list() for node in nodes: for metaedge, edges in node.edges.items(): rows.append((str(node), node.name, str(metaedge), len(edges))) df = pandas.DataFrame(rows, columns=['node_id', 'node_name', 'metaedge', 'degree']) return df.sort(['node_name', 'metaedge']) def plot_degrees_for_metanode(graph, metanode, col_wrap=2, facet_height=4): """ Plots histograms of the degree distribution of each metaedge incident to the metanode. Each metaedge receives a facet in a seaborn.FacetGrid. """ degree_df = get_degrees_for_metanode(graph, metanode) grid = seaborn.FacetGrid(degree_df, col='metaedge', sharex=False, sharey=False, col_wrap=col_wrap, size=facet_height) grid.map(seaborn.distplot, 'degree', kde=False) grid.set_titles('{col_name}') return grid def plot_degrees(graph, path): """ Creates a multipage pdf with a page for each metanode showing degree distributions. """ # Temporarily disable `figure.max_open_warning` max_open = matplotlib.rcParams['figure.max_open_warning'] matplotlib.rcParams['figure.max_open_warning'] = 0 pdf_pages = matplotlib.backends.backend_pdf.PdfPages(path) for metanode in graph.metagraph.get_nodes(): grid = plot_degrees_for_metanode(graph, metanode) grid.savefig(pdf_pages, format='pdf') pdf_pages.close() matplotlib.rcParams['figure.max_open_warning'] = max_open def get_metanode_df(graph): rows = list() for metanode, nodes in graph.get_metanode_to_nodes().items(): series = pandas.Series() series['metanode'] = metanode series['abbreviation'] = metanode.abbrev metaedges = set() for metaedge in metanode.edges: metaedges |= {metaedge, metaedge.inverse} series['metaedges'] = sum([not metaedge.inverted for metaedge in metaedges]) series['nodes'] = len(nodes) series['unconnected_nodes'] = sum(not any(node.edges.values()) for node in nodes) rows.append(series) return pandas.DataFrame(rows).sort('metanode') def get_metaedge_df(graph): rows = list() for metaedge, edges in graph.get_metaedge_to_edges().items(): series = pandas.Series() series['metaedge'] = str(metaedge) series['abbreviation'] = metaedge.get_abbrev() series['inverted'] = int(metaedge.inverted) series['edges'] = len(edges) series['source_nodes'] = len(set(edge.source for edge in edges)) series['target_nodes'] = len(set(edge.target for edge in edges)) rows.append(series) return pandas.DataFrame(rows).sort('metaedge')
Python
0
49839733a7f26070e8d666d91fae177711154e1d
Change histogram_proto to use a custom logger.
tracing/tracing/proto/histogram_proto.py
tracing/tracing/proto/histogram_proto.py
# Copyright 2020 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import absolute_import try: # Note: from tracing.proto import histogram_pb2 would make more sense here, # but unfortunately protoc does not generate __init__.py files if you specify # an out package (at least for the gn proto_library rule). import histogram_pb2 # pylint:disable=relative-import HAS_PROTO = True except ImportError as e: try: # crbug/1234919 # Catapult put the generated histogram_pb2.py in the same source folder, # while the others (e.g., webrtc) put it in output path. By default we # try to import from the sys.path. Here allows to try import from the # source folder as well. # TODO(wenbinzhang): Clean up import paths to work consistently. from . import histogram_pb2 # pylint:disable=relative-import HAS_PROTO = True except ImportError: HAS_PROTO = False def _EnsureProto(): """Ensures histogram_pb.py is in the PYTHONPATH. If the assert fails here, it means your script doesn't ensure histogram_pb2.py is generated and is in the PYTHONPATH. To fix this, depend on the GN rule in BUILD.gn and ensure the script gets the out/Whatever/pyproto dir in its PYTHONPATH (for instance by making your script take a --out-dir=out/Whatever flag). """ assert HAS_PROTO, ('Tried to use histogram protos, but missing ' 'histogram_pb2.py. Try cd tracing/proto && make.') def Pb2(): """Resolves the histogram proto stub. Where you would use histogram_pb2.X, instead do histogram_proto.Pb2().X. """ _EnsureProto() return histogram_pb2 if HAS_PROTO: PROTO_UNIT_MAP = { histogram_pb2.MS: 'ms', histogram_pb2.MS_BEST_FIT_FORMAT: 'msBestFitFormat', histogram_pb2.TS_MS: 'tsMs', histogram_pb2.N_PERCENT: 'n%', histogram_pb2.SIZE_IN_BYTES: 'sizeInBytes', histogram_pb2.BYTES_PER_SECOND: 'bytesPerSecond', histogram_pb2.J: 'J', histogram_pb2.W: 'W', histogram_pb2.A: 'A', histogram_pb2.V: 'V', histogram_pb2.HERTZ: 'Hz', histogram_pb2.UNITLESS: 'unitless', histogram_pb2.COUNT: 'count', histogram_pb2.SIGMA: 'sigma', } UNIT_PROTO_MAP = {v: k for k, v in PROTO_UNIT_MAP.items()} PROTO_IMPROVEMENT_DIRECTION_MAP = { histogram_pb2.BIGGER_IS_BETTER: 'biggerIsBetter', histogram_pb2.SMALLER_IS_BETTER: 'smallerIsBetter', } IMPROVEMENT_DIRECTION_PROTO_MAP = { v: k for k, v in PROTO_IMPROVEMENT_DIRECTION_MAP.items() } def UnitFromProto(proto_unit): _EnsureProto() direction = proto_unit.improvement_direction unit = PROTO_UNIT_MAP[proto_unit.unit] if direction and direction != histogram_pb2.NOT_SPECIFIED: unit += '_' + PROTO_IMPROVEMENT_DIRECTION_MAP[direction] return unit def ProtoFromUnit(unit): _EnsureProto() parts = unit.split('_') assert unit assert 0 < len(parts) <= 2, ('expected <unit>_(bigger|smaller)IsBetter' + str(parts)) proto_unit = histogram_pb2.UnitAndDirection() proto_unit.unit = UNIT_PROTO_MAP[parts[0]] if len(parts) > 1: proto_unit.improvement_direction = IMPROVEMENT_DIRECTION_PROTO_MAP[parts[1]] return proto_unit
# Copyright 2020 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import absolute_import import logging try: # Note: from tracing.proto import histogram_pb2 would make more sense here, # but unfortunately protoc does not generate __init__.py files if you specify # an out package (at least for the gn proto_library rule). import histogram_pb2 # pylint:disable=relative-import HAS_PROTO = True except ImportError as e: try: # crbug/1234919 # Catapult put the generated histogram_pb2.py in the same source folder, # while the others (e.g., webrtc) put it in output path. By default we # try to import from the sys.path. Here allows to try import from the # source folder as well. logging.warning( 'Failed to import histogram_pb2: %s', repr(e)) from . import histogram_pb2 # pylint:disable=relative-import logging.warning( 'Retried and successfully imported histogram_pb2: %s', histogram_pb2) HAS_PROTO = True except ImportError: HAS_PROTO = False def _EnsureProto(): """Ensures histogram_pb.py is in the PYTHONPATH. If the assert fails here, it means your script doesn't ensure histogram_pb2.py is generated and is in the PYTHONPATH. To fix this, depend on the GN rule in BUILD.gn and ensure the script gets the out/Whatever/pyproto dir in its PYTHONPATH (for instance by making your script take a --out-dir=out/Whatever flag). """ assert HAS_PROTO, ('Tried to use histogram protos, but missing ' 'histogram_pb2.py. Try cd tracing/proto && make.') def Pb2(): """Resolves the histogram proto stub. Where you would use histogram_pb2.X, instead do histogram_proto.Pb2().X. """ _EnsureProto() return histogram_pb2 if HAS_PROTO: PROTO_UNIT_MAP = { histogram_pb2.MS: 'ms', histogram_pb2.MS_BEST_FIT_FORMAT: 'msBestFitFormat', histogram_pb2.TS_MS: 'tsMs', histogram_pb2.N_PERCENT: 'n%', histogram_pb2.SIZE_IN_BYTES: 'sizeInBytes', histogram_pb2.BYTES_PER_SECOND: 'bytesPerSecond', histogram_pb2.J: 'J', histogram_pb2.W: 'W', histogram_pb2.A: 'A', histogram_pb2.V: 'V', histogram_pb2.HERTZ: 'Hz', histogram_pb2.UNITLESS: 'unitless', histogram_pb2.COUNT: 'count', histogram_pb2.SIGMA: 'sigma', } UNIT_PROTO_MAP = {v: k for k, v in PROTO_UNIT_MAP.items()} PROTO_IMPROVEMENT_DIRECTION_MAP = { histogram_pb2.BIGGER_IS_BETTER: 'biggerIsBetter', histogram_pb2.SMALLER_IS_BETTER: 'smallerIsBetter', } IMPROVEMENT_DIRECTION_PROTO_MAP = { v: k for k, v in PROTO_IMPROVEMENT_DIRECTION_MAP.items() } def UnitFromProto(proto_unit): _EnsureProto() direction = proto_unit.improvement_direction unit = PROTO_UNIT_MAP[proto_unit.unit] if direction and direction != histogram_pb2.NOT_SPECIFIED: unit += '_' + PROTO_IMPROVEMENT_DIRECTION_MAP[direction] return unit def ProtoFromUnit(unit): _EnsureProto() parts = unit.split('_') assert unit assert 0 < len(parts) <= 2, ('expected <unit>_(bigger|smaller)IsBetter' + str(parts)) proto_unit = histogram_pb2.UnitAndDirection() proto_unit.unit = UNIT_PROTO_MAP[parts[0]] if len(parts) > 1: proto_unit.improvement_direction = IMPROVEMENT_DIRECTION_PROTO_MAP[parts[1]] return proto_unit
Python
0.000001
527ceabcdbded592c02ee2dce18a19ffce0248c2
Remove unnecesary comment
trunk/bdp_fe/src/bdp_fe/jobconf/views.py
trunk/bdp_fe/src/bdp_fe/jobconf/views.py
""" Module bdp_fe.jobconf.views """ import logging from django import forms from django.contrib import auth, messages from django.contrib.auth.decorators import login_required from django.core.urlresolvers import reverse from django.http import HttpResponseNotFound from django.shortcuts import get_object_or_404, redirect, render_to_response from django.template import RequestContext from pymongo import Connection import custom_model from models import Job, JobModel from views_util import safe_int_param LOGGER = logging.getLogger(__name__) @login_required def list_jobs(request): job_id = safe_int_param(request.GET, 'run_job') if job_id: run_job(request, job_id) return render_to_response('job_listing.html', { 'title': 'Job listing', 'jobs': Job.objects.all(), }, context_instance=RequestContext(request)) def retrieve_results(job_id): ans = [] ## TODO: make configurable connection = Connection('localhost', 27017) db = connection.test_database job_results = db.test_collection for job_result in job_results.find({"job_id" : job_id}): ans.append(job_result) return ans @login_required def view_results(request, job_id): job_id = int(job_id) results = retrieve_results(job_id) return render_to_response('job_results.html', { 'title' : 'Results of job %s' % job_id, 'job_results' : results }, context_instance=RequestContext(request)) def run_job(request, job_id): try: job = Job.objects.get(id=job_id) if job.status != Job.CREATED: msg = "Cannot start job in %s status" % job.get_status_display() messages.warning(request, msg) LOGGER.warning(msg) return job.status = Job.RUNNING job.save() # TODO: Unimplemented behaviour LOGGER.warning("Unimplemented job start") messages.info(request, "Job %s was started." % job_id) except Job.DoesNotExist: messages.warning(request, "Cannot start job %d: not found" % job_id) LOGGER.warning("Job %d not found" % job_id) class NewJobForm(forms.Form): name = forms.CharField(max_length=40) @login_required def new_job(request): if request.method == 'POST': form = NewJobForm(request.POST) if form.is_valid(): job = Job(name=form.cleaned_data['name'], user=request.user, status=Job.CREATED) job.save() return redirect(reverse('config_job', args=[job.id])) else: form = NewJobForm() return render_to_response('new_job.html', { 'title': 'New job', 'form': form, }, context_instance=RequestContext(request)) class UploadJarForm(forms.Form): file = forms.FileField() @login_required def config_job(request, job_id): job = get_object_or_404(Job, pk=job_id, user=request.user) if request.method == 'POST': form = UploadJarForm(request.POST, request.FILES) if form.is_valid() and custom_model.handle_upload(job, request.FILES['file']): return redirect(reverse('upload_data', args=[job.id])) else: messages.info(request, 'JAR file upload failed') else: form = UploadJarForm() return render_to_response('upload_jar.html', { 'title': 'Configure custom job', 'job_id' : job.id, 'form': form, }, context_instance=RequestContext(request)) @login_required def upload_data(request, job_id): return HttpResponseNotFound()
""" Module bdp_fe.jobconf.views """ import logging from django import forms from django.contrib import auth, messages from django.contrib.auth.decorators import login_required from django.core.urlresolvers import reverse from django.http import HttpResponseNotFound from django.shortcuts import get_object_or_404, redirect, render_to_response from django.template import RequestContext from pymongo import Connection import custom_model from models import Job, JobModel from views_util import safe_int_param LOGGER = logging.getLogger(__name__) @login_required def list_jobs(request): job_id = safe_int_param(request.GET, 'run_job') if job_id: run_job(request, job_id) return render_to_response('job_listing.html', { 'title': 'Job listing', 'jobs': Job.objects.all(), }, context_instance=RequestContext(request)) def retrieve_results(job_id): ans = [] ## TODO: make configurable connection = Connection('localhost', 27017) db = connection.test_database job_results = db.test_collection for job_result in job_results.find({"job_id" : job_id}): ans.append(job_result) return ans @login_required def view_results(request, job_id): job_id = int(job_id) ## Django URL regexp enforces this results = retrieve_results(job_id) return render_to_response('job_results.html', { 'title' : 'Results of job %s' % job_id, 'job_results' : results }, context_instance=RequestContext(request)) def run_job(request, job_id): try: job = Job.objects.get(id=job_id) if job.status != Job.CREATED: msg = "Cannot start job in %s status" % job.get_status_display() messages.warning(request, msg) LOGGER.warning(msg) return job.status = Job.RUNNING job.save() # TODO: Unimplemented behaviour LOGGER.warning("Unimplemented job start") messages.info(request, "Job %s was started." % job_id) except Job.DoesNotExist: messages.warning(request, "Cannot start job %d: not found" % job_id) LOGGER.warning("Job %d not found" % job_id) class NewJobForm(forms.Form): name = forms.CharField(max_length=40) @login_required def new_job(request): if request.method == 'POST': form = NewJobForm(request.POST) if form.is_valid(): job = Job(name=form.cleaned_data['name'], user=request.user, status=Job.CREATED) job.save() return redirect(reverse('config_job', args=[job.id])) else: form = NewJobForm() return render_to_response('new_job.html', { 'title': 'New job', 'form': form, }, context_instance=RequestContext(request)) class UploadJarForm(forms.Form): file = forms.FileField() @login_required def config_job(request, job_id): job = get_object_or_404(Job, pk=job_id, user=request.user) if request.method == 'POST': form = UploadJarForm(request.POST, request.FILES) if form.is_valid() and custom_model.handle_upload(job, request.FILES['file']): return redirect(reverse('upload_data', args=[job.id])) else: messages.info(request, 'JAR file upload failed') else: form = UploadJarForm() return render_to_response('upload_jar.html', { 'title': 'Configure custom job', 'job_id' : job.id, 'form': form, }, context_instance=RequestContext(request)) @login_required def upload_data(request, job_id): return HttpResponseNotFound()
Python
0
9371b1484e7843e479c5c54997d339d46cf4aedd
add logging
fastapp/plugins/__init__.py
fastapp/plugins/__init__.py
import os import logging logger = logging.getLogger(__name__) class Singleton(type): def __init__(cls, name, bases, dict): super(Singleton, cls).__init__(name, bases, dict) cls.instance = None def __call__(cls,*args,**kw): if cls.instance is None: logger.info("Create singleton instance for %s" % cls) cls.instance = super(Singleton, cls).__call__(*args, **kw) else: logger.info("Return singleton instance for %s" % cls) return cls.instance class PluginRegistry(object): __metaclass__ = Singleton def __init__(self): self.plugins = [] def __iter__(self): return iter(self.plugins) def add(self, cls): if cls not in self.plugins: logger.info("Register: %s" % cls) cls.init() self.plugins.append(cls) else: logger.debug("Already registered: %s" % cls) def get(self): return self.plugins def register_plugin(cls): """Class decorator for adding plugins to the registry""" PluginRegistry().add(cls()) return cls def call_plugin_func(obj, func): r_success = {} r_failed = {} registry = PluginRegistry() for plugin in registry.get(): logger.info("Handling plugin %s for %s starting" % (plugin, func)) try: plugin_func = getattr(plugin, func) r = plugin_func(obj) r_success[plugin.name] = r except Exception, e: logger.exception(e) r_failed[plugin.name] = e logger.info("Handling plugin %s for %s ended" % (plugin, func)) logger.info("Loaded %s with success, %s with errors" % (len(r_success), len(r_failed))) return r_success, r_failed class Plugin(object): __metaclass__ = Singleton def __init__(self, *args, **kwargs): self.kwargs = kwargs super(Plugin, self ).__init__() @property def name(self): return self.__class__.__name__ def attach_worker(self, **kwargs): pass def config_for_workers(self, base): # send dictionary with config to workers for the plugin # the dictionary is available in self.config(base) config = {} config.update(self.config(base)) logger.info("Config to worker for plugin %s" % self.name) return config @property def shortname(self): return self.__class__.__module__.split(".")[-1] def init(self): pass def on_create_user(self, user): pass def on_create_base(self, base): pass def on_delete_base(self, base): pass def on_start_base(self, base): pass def on_stop_base(self, base): pass def on_restart_base(self, base): pass def on_destroy_base(self, base): pass def cockpit_context(self): return {} def executor_context(self, executor): return {} def executor_context_kv(self, executor): context = self.executor_context(self, executor) new_context = [] for k, v in context.items(): new_context.append({ 'key': k, 'value': k, }) return new_context
import os import logging logger = logging.getLogger(__name__) class Singleton(type): def __init__(cls, name, bases, dict): super(Singleton, cls).__init__(name, bases, dict) cls.instance = None def __call__(cls,*args,**kw): if cls.instance is None: logger.info("Create singleton instance for %s" % cls) cls.instance = super(Singleton, cls).__call__(*args, **kw) else: logger.info("Return singleton instance for %s" % cls) return cls.instance class PluginRegistry(object): __metaclass__ = Singleton def __init__(self): self.plugins = [] def __iter__(self): return iter(self.plugins) def add(self, cls): if cls not in self.plugins: logger.info("Register: %s" % cls) cls.init() self.plugins.append(cls) else: logger.debug("Already registered: %s" % cls) def get(self): return self.plugins def register_plugin(cls): """Class decorator for adding plugins to the registry""" PluginRegistry().add(cls()) return cls def call_plugin_func(obj, func): r_success = {} r_failed = {} registry = PluginRegistry() for plugin in registry.get(): logger.info("Handling plugin %s for %s" % (plugin, func)) try: plugin_func = getattr(plugin, func) r = plugin_func(obj) r_success[plugin.name] = r except Exception, e: logger.exception(e) r_failed[plugin.name] = e return r_success, r_failed class Plugin(object): __metaclass__ = Singleton def __init__(self, *args, **kwargs): self.kwargs = kwargs super(Plugin, self ).__init__() @property def name(self): return self.__class__.__name__ def attach_worker(self, **kwargs): pass def config_for_workers(self, base): # send dictionary with config to workers for the plugin # the dictionary is available in self.config(base) config = {} config.update(self.config(base)) logger.info("Config to worker for plugin %s" % self.name) return config @property def shortname(self): return self.__class__.__module__.split(".")[-1] def init(self): pass def on_create_user(self, user): pass def on_create_base(self, base): pass def on_delete_base(self, base): pass def on_start_base(self, base): pass def on_stop_base(self, base): pass def on_restart_base(self, base): pass def on_destroy_base(self, base): pass def cockpit_context(self): return {} def executor_context(self, executor): return {} def executor_context_kv(self, executor): context = self.executor_context(self, executor) new_context = [] for k, v in context.items(): new_context.append({ 'key': k, 'value': k, }) return new_context
Python
0.000001
279bebc53c2f589db943c91f31240a38ad059d72
optimize username loginfield for mobile devices
features/gestalten/forms.py
features/gestalten/forms.py
import allauth from crispy_forms import bootstrap, layout import django from django import forms from django.contrib.auth import models as auth_models from django.contrib.sites import models as sites_models from features.groups import models as groups from utils import forms as utils_forms from features.gestalten import models def validate_slug(slug): if slug in django.conf.settings.ENTITY_SLUG_BLACKLIST: raise django.core.exceptions.ValidationError( 'Die Adresse \'%(slug)s\' ist reserviert und darf nicht verwendet werden.', params={'slug': slug}, code='reserved') if groups.Group.objects.filter(slug=slug).exists(): raise django.core.exceptions.ValidationError( 'Die Adresse \'%(slug)s\' ist bereits vergeben.', params={'slug': slug}, code='in-use') class User(utils_forms.FormMixin, forms.ModelForm): class Meta: fields = ('first_name', 'last_name', 'username') labels = {'username': 'Adresse der Benutzerseite / Pseudonym'} model = auth_models.User def clean_username(self): slug = self.cleaned_data['username'] validate_slug(slug) return slug class Gestalt(utils_forms.ExtraFormMixin, forms.ModelForm): extra_form_class = User class Meta: fields = ('about', 'public') model = models.Gestalt def get_instance(self): return self.instance.user def get_layout(self): DOMAIN = sites_models.Site.objects.get_current().domain return ( bootstrap.PrependedText( 'username', '%(domain)s/' % {'domain': DOMAIN}), 'first_name', 'last_name', layout.Field('about', rows=5), 'public', utils_forms.Submit('Profil ändern'), ) class Login(allauth.account.forms.LoginForm): password = forms.CharField(label='Kennwort', widget=forms.PasswordInput()) remember = forms.BooleanField(label='Anmeldung merken', required=False) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['login'] = forms.CharField( label='E-Mail-Adresse oder Pseudonym', widget=forms.TextInput(attrs={ 'autofocus': 'autofocus', 'autocomplete': 'username', 'autocorrect': 'off', 'autocapitalize': 'none', 'spellcheck': 'false' }))
import allauth from crispy_forms import bootstrap, layout import django from django import forms from django.contrib.auth import models as auth_models from django.contrib.sites import models as sites_models from features.groups import models as groups from utils import forms as utils_forms from features.gestalten import models def validate_slug(slug): if slug in django.conf.settings.ENTITY_SLUG_BLACKLIST: raise django.core.exceptions.ValidationError( 'Die Adresse \'%(slug)s\' ist reserviert und darf nicht verwendet werden.', params={'slug': slug}, code='reserved') if groups.Group.objects.filter(slug=slug).exists(): raise django.core.exceptions.ValidationError( 'Die Adresse \'%(slug)s\' ist bereits vergeben.', params={'slug': slug}, code='in-use') class User(utils_forms.FormMixin, forms.ModelForm): class Meta: fields = ('first_name', 'last_name', 'username') labels = {'username': 'Adresse der Benutzerseite / Pseudonym'} model = auth_models.User def clean_username(self): slug = self.cleaned_data['username'] validate_slug(slug) return slug class Gestalt(utils_forms.ExtraFormMixin, forms.ModelForm): extra_form_class = User class Meta: fields = ('about', 'public') model = models.Gestalt def get_instance(self): return self.instance.user def get_layout(self): DOMAIN = sites_models.Site.objects.get_current().domain return ( bootstrap.PrependedText( 'username', '%(domain)s/' % {'domain': DOMAIN}), 'first_name', 'last_name', layout.Field('about', rows=5), 'public', utils_forms.Submit('Profil ändern'), ) class Login(allauth.account.forms.LoginForm): password = forms.CharField(label='Kennwort', widget=forms.PasswordInput()) remember = forms.BooleanField(label='Anmeldung merken', required=False) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['login'] = forms.CharField( label='E-Mail-Adresse oder Pseudonym', widget=forms.TextInput(attrs={'autofocus': 'autofocus'}))
Python
0
64804965e031f365937ef8fe70dc749c4532053d
fix abstract scraper, can't use lxml's url parsing because we need a custom user agent
tx_highered/scripts/initial_wikipedia.py
tx_highered/scripts/initial_wikipedia.py
#! /usr/bin/env python try: from django.utils.timezone import now except ImportError: from datetime.datetime import now import requests from lxml.html import document_fromstring, tostring from tx_highered.models import Institution def get_wiki_title(name): endpoint = "http://en.wikipedia.org/w/api.php" params = dict(action="opensearch", search=name, limit=1, namespace=0, format="json",) r = requests.get(endpoint, params=params) try: _, results = r.json title = results[0] except IndexError: return None return title def get_wiki_abstract(url): r = requests.get(url, headers={'User-Agent': 'thedp-scraper/0.1alpha'}) doc = document_fromstring(r.text) root = doc try: toc = root.get_element_by_id('toc') except KeyError: return None abstract = [] for elem in toc.getparent().iterchildren(): if elem == toc: break if elem.tag == 'p': elem.make_links_absolute(url) abstract.append(tostring(elem)) return "\n".join(abstract).strip() def main(): queryset = Institution.objects.filter(institution_type='uni') qs = queryset.filter(wikipedia_title__isnull=True) for inst in qs: title = get_wiki_title(inst.name) if title: inst.wikipedia_title = title inst.save() print inst.name + " -> " + title qs = queryset.filter(wikipedia_title__isnull=False, wikipedia_scraped=None) for inst in qs: text = get_wiki_abstract(inst.wikipedia_url) if text: inst.wikipedia_abstract = text inst.wikipedia_scraped = now() inst.save() print inst if __name__ == "__main__": main()
#! /usr/bin/env python import datetime import requests from lxml.html import parse, tostring from tx_highered.models import Institution def get_wiki_title(name): endpoint = "http://en.wikipedia.org/w/api.php" params = dict(action="opensearch", search=name, limit=1, namespace=0, format="json",) r = requests.get(endpoint, params=params) try: _, results = r.json title = results[0] except IndexError: return None return title def get_wiki_abstract(url): doc = parse(url) # won't handle https root = doc.getroot() toc = root.get_element_by_id('toc') abstract = [] for elem in toc.getparent().iterchildren(): if elem == toc: break if elem.tag == 'p': elem.make_links_absolute() abstract.append(tostring(elem)) return "\n".join(abstract).strip() def main(): queryset = Institution.objects.filter(institution_type='uni') qs = queryset.filter(wikipedia_title__isnull=True) for inst in qs: title = get_wiki_title(inst.name) if title: inst.wikipedia_title = title inst.save() print inst.name + " -> " + title qs = queryset.filter(wikipedia_title__isnull=False, wikipedia_scraped=None) for inst in qs: text = get_wiki_abstract(inst.wikipedia_url) if text: inst.wikipedia_abstract = text inst.wikipedia_scraped = datetime.datetime.now() inst.save() print inst if __name__ == "__main__": main()
Python
0.000001
db3f71f537a85396d777ba28d3ad6c8156137c24
Change pg key
src/python/pagerduty.py
src/python/pagerduty.py
import json import urllib2 PD_URL = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" TIMEOUT = 10 def request(action, json_str): obj = json.loads(json_str) description = "%s %s is %s ( %s )" % ( obj.get('host', 'unknown host'), obj.get('service', 'unknown service'), obj.get('state', 'unknown state'), obj.get('metric', 'nil')) pg_key = obj.pop('pg_key') event = { 'service_key': pg_key, 'event_type': action, 'incident_key': "%s %s" % (obj['host'], obj['service']), 'description': description, 'details': json.dumps(obj) } try: result = json.loads( urllib2.urlopen(PD_URL, json.dumps(event), TIMEOUT).read()) print result except Exception, e: print str(e) return False return result['status'] == 'success' def trigger(json_str): return request('trigger', json_str) def acknowledge(json_str): return request('acknowledge', json_str) def resolve(json_str): return request('resolve', json_str) args = { 'pg_key': 'fixme', 'description': 'this is a test', 'host': 'foobar.com', 'service': 'whatever' } #trigger(json.dumps(args)) #resolve(json.dumps(args))
import json import urllib2 PD_URL = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" TIMEOUT = 10 def request(action, json_str): obj = json.loads(json_str) description = "%s %s is %s ( %s )" % ( obj.get('host', 'unknown host'), obj.get('service', 'unknown service'), obj.get('state', 'unknown state'), obj.get('metric', 'nil')) pg_key = obj.pop('pg_key') event = { 'service_key': pg_key, 'event_type': action, 'incident_key': "%s %s" % (obj['host'], obj['service']), 'description': description, 'details': json.dumps(obj) } try: result = json.loads( urllib2.urlopen(PD_URL, json.dumps(event), TIMEOUT).read()) print result except Exception, e: print str(e) return False return result['status'] == 'success' def trigger(json_str): return request('trigger', json_str) def acknowledge(json_str): return request('acknowledge', json_str) def resolve(json_str): return request('resolve', json_str) args = { 'pg_key': '113852fbf4d34663b87b7321e9eba1e1', 'description': 'this is a test', 'host': 'foobar.com', 'service': 'whatever' } #trigger(json.dumps(args)) #resolve(json.dumps(args))
Python
0.000001
8c959354b59fb25f63ca73ecdcbd0f59197cabc9
Add --random option.
scanless/cli/main.py
scanless/cli/main.py
#!/usr/bin/env python # # scanless - public port scan scrapper # https://github.com/vesche/scanless # import argparse import sys from random import choice from scanless.scanners import * SCAN_LIST = '''Scanner Name | Website ---------------|------------------------------ yougetsignal | http://www.yougetsignal.com viewdns | http://viewdns.info hackertarget | https://hackertarget.com ipfingerprints | http://www.ipfingerprints.com pingeu | http://ping.eu spiderip | https://spiderip.com portcheckers | http://www.portcheckers.com t1shopper | http://www.t1shopper.com ''' SCANNERS = { 'yougetsignal': yougetsignal, 'viewdns': viewdns, 'hackertarget': hackertarget, 'ipfingerprints': ipfingerprints, 'pingeu': pingeu, 'spiderip': spiderip, 'portcheckers': portcheckers, 't1shopper': t1shopper } def scanless(target, scanner): def run(s): try: return SCANNERS[s].scan(target) except: return 'Error, {} was unable to run.'.format(s) print('Running scanless...') if scanner == 'all': for s, _ in SCANNERS.items(): print(run(s)) elif scanner in SCANNERS: print(run(scanner)) else: print('Scanner not found, see --list to view all supported scanners.') def get_parser(): parser = argparse.ArgumentParser(description='scanless, public port scan scrapper') parser.add_argument('-t', '--target', help='ip or domain to scan', type=str) parser.add_argument('-s', '--scanner', help='scanner to use (default: yougetsignal)', type=str, default='yougetsignal') parser.add_argument('-r', '--random', help='use a random scanner', action='store_true') parser.add_argument('-l', '--list', help='list scanners', action='store_true') parser.add_argument('-a', '--all', help='use all the scanners', action='store_true') return parser def main(): parser = get_parser() args = vars(parser.parse_args()) if args['list']: print(SCAN_LIST) return if not args['target']: parser.print_help() return target = args['target'] scanner = args['scanner'].lower() if args['random']: scanner = choice([s for s, _ in SCANNERS.items()]) if args['all']: scanner = 'all' scanless(target, scanner) if __name__ == '__main__': main()
#!/usr/bin/env python # # scanless - public port scan scrapper # https://github.com/vesche/scanless # import argparse import sys from scanless.scanners import * SCAN_LIST = '''Scanner Name | Website ---------------|------------------------------ yougetsignal | http://www.yougetsignal.com viewdns | http://viewdns.info hackertarget | https://hackertarget.com ipfingerprints | http://www.ipfingerprints.com pingeu | http://ping.eu spiderip | https://spiderip.com portcheckers | http://www.portcheckers.com t1shopper | http://www.t1shopper.com ''' SCANNERS = { 'yougetsignal': yougetsignal, 'viewdns': viewdns, 'hackertarget': hackertarget, 'ipfingerprints': ipfingerprints, 'pingeu': pingeu, 'spiderip': spiderip, 'portcheckers': portcheckers, 't1shopper': t1shopper } def scanless(target, scanner): def run(s): try: return SCANNERS[s].scan(target) except: return 'Error, {} was unable to run.'.format(s) print('Running scanless...') if scanner == 'all': for s, _ in SCANNERS.items(): print(run(s)) elif scanner in SCANNERS: print(run(scanner)) else: print('Scanner not found, see --list to view all supported scanners.') def get_parser(): parser = argparse.ArgumentParser(description='scanless, public port scan scrapper') parser.add_argument('-t', '--target', help='ip or domain to scan', type=str) parser.add_argument('-s', '--scanner', help='scanner to use (default: yougetsignal)', type=str, default='yougetsignal') parser.add_argument('-l', '--list', help='list scanners', action='store_true') parser.add_argument('-a', '--all', help='use all the scanners', action='store_true') return parser def main(): parser = get_parser() args = vars(parser.parse_args()) if args['list']: print(SCAN_LIST) return if not args['target']: parser.print_help() return target = args['target'] scanner = args['scanner'].lower() if args['all']: scanner = 'all' scanless(target, scanner) if __name__ == '__main__': main()
Python
0.000001
3999e9812a766066dcccf6a4d07174144cb9f72d
Add Minecraft Wiki link to version item
wurstmineberg.45s.py
wurstmineberg.45s.py
#!/usr/local/bin/python3 import requests people = requests.get('https://api.wurstmineberg.de/v2/people.json').json() status = requests.get('https://api.wurstmineberg.de/v2/world/wurstmineberg/status.json').json() print(len(status['list'])) print('---') print('Version: {ver}|href=http://minecraft.gamepedia.com/{ver} color=gray'.format(ver=status['version'])) for wmb_id in status['list']: display_name = people['people'].get(wmb_id, {}).get('name', wmb_id) if people['people'].get(wmb_id, False) and people['people'][wmb_id].get('slack', False): slack_name = people['people'][wmb_id]['slack']['username'] slack_url = 'https://wurstmineberg.slack.com/messages/@' + slack_name else: slack_url = None print('{}|href=https://wurstmineberg.de/people/{} color=#2889be'.format(display_name, wmb_id)) if slack_url is not None: print('@{}|alternate=true href={} color=red'.format(slack_name, slack_url)) print('---') print('Start Minecraft | bash=/usr/bin/open param1=-a param2=Minecraft terminal=false')
#!/usr/local/bin/python3 import requests people = requests.get('https://api.wurstmineberg.de/v2/people.json').json() status = requests.get('https://api.wurstmineberg.de/v2/world/wurstmineberg/status.json').json() print(len(status['list'])) print('---') print('Version: {}|color=gray'.format(status['version'])) for wmb_id in status['list']: display_name = people['people'].get(wmb_id, {}).get('name', wmb_id) if people['people'].get(wmb_id, False) and people['people'][wmb_id].get('slack', False): slack_name = people['people'][wmb_id]['slack']['username'] slack_url = 'https://wurstmineberg.slack.com/messages/@' + slack_name else: slack_url = None print('{}|href=https://wurstmineberg.de/people/{} color=#2889be'.format(display_name, wmb_id)) if slack_url is not None: print('@{}|alternate=true href={} color=red'.format(slack_name, slack_url)) print('---') print('Start Minecraft | bash=/usr/bin/open param1=-a param2=Minecraft terminal=false')
Python
0
d792201bc311a15e5df48259008331b771c59aca
Fix CSS problem when Flexx is enbedded in page-app
flexx/ui/layouts/_layout.py
flexx/ui/layouts/_layout.py
""" Layout widgets """ from . import Widget class Layout(Widget): """ Abstract class for widgets that organize their child widgets. Panel widgets are layouts that do not take the natural size of their content into account, making them more efficient and suited for high-level layout. Other layouts, like HBox, are more suited for laying out content where the natural size is important. """ CSS = """ body { margin: 0; padding: 0; /*overflow: hidden;*/ } .flx-Layout { /* sizing of widgets/layouts inside layout is defined per layout */ width: 100%; height: 100%; margin: 0px; padding: 0px; border-spacing: 0px; border: 0px; } """
""" Layout widgets """ from . import Widget class Layout(Widget): """ Abstract class for widgets that organize their child widgets. Panel widgets are layouts that do not take the natural size of their content into account, making them more efficient and suited for high-level layout. Other layouts, like HBox, are more suited for laying out content where the natural size is important. """ CSS = """ body { margin: 0; padding: 0; overflow: hidden; } .flx-Layout { /* sizing of widgets/layouts inside layout is defined per layout */ width: 100%; height: 100%; margin: 0px; padding: 0px; border-spacing: 0px; border: 0px; } """
Python
0.000059
a9cfdf8fdb6853f175cdc31abc2dec91ec6dcf3a
fix import
InvenTree/part/tasks.py
InvenTree/part/tasks.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals import logging from django.utils.translation import gettext_lazy as _ import InvenTree.helpers import InvenTree.tasks import common.notifications import part.models logger = logging.getLogger("inventree") def notify_low_stock(part: part.models.Part): name = _("Low stock notification") message = _(f'The available stock for {part.name} has fallen below the configured minimum level') context = { 'part': part, 'name': name, 'message': message, 'link': InvenTree.helpers.construct_absolute_url(part.get_absolute_url()), 'template': { 'html': 'email/low_stock_notification.html', 'subject': "[InvenTree] " + name, }, } common.notifications.trigger_notifaction( part, 'part.notify_low_stock', target_fnc=part.get_subscribers, context=context, ) def notify_low_stock_if_required(part: part.models.Part): """ Check if the stock quantity has fallen below the minimum threshold of part. If true, notify the users who have subscribed to the part """ # Run "up" the tree, to allow notification for "parent" parts parts = part.get_ancestors(include_self=True, ascending=True) for p in parts: if p.is_part_low_on_stock(): InvenTree.tasks.offload_task( notify_low_stock, p )
# -*- coding: utf-8 -*- from __future__ import unicode_literals import logging from django.utils.translation import gettext_lazy as _ import InvenTree.helpers import InvenTree.tasks import common.notifications import part.models from part import tasks as part_tasks logger = logging.getLogger("inventree") def notify_low_stock(part: part.models.Part): name = _("Low stock notification") message = _(f'The available stock for {part.name} has fallen below the configured minimum level') context = { 'part': part, 'name': name, 'message': message, 'link': InvenTree.helpers.construct_absolute_url(part.get_absolute_url()), 'template': { 'html': 'email/low_stock_notification.html', 'subject': "[InvenTree] " + name, }, } common.notifications.trigger_notifaction( part, 'part.notify_low_stock', target_fnc=part.get_subscribers, context=context, ) def notify_low_stock_if_required(part: part.models.Part): """ Check if the stock quantity has fallen below the minimum threshold of part. If true, notify the users who have subscribed to the part """ # Run "up" the tree, to allow notification for "parent" parts parts = part.get_ancestors(include_self=True, ascending=True) for p in parts: if p.is_part_low_on_stock(): InvenTree.tasks.offload_task( part_tasks.notify_low_stock, p )
Python
0.000001
26a21a9f5da718852c193420a0132ad822139ec0
Remove PHPBB crap
apps/devmo/context_processors.py
apps/devmo/context_processors.py
from django.conf import settings from django.utils import translation def i18n(request): return {'LANGUAGES': settings.LANGUAGES, 'LANG': settings.LANGUAGE_URL_MAP.get(translation.get_language()) or translation.get_language(), 'DIR': 'rtl' if translation.get_language_bidi() else 'ltr', } def next_url(request): if 'login' not in request.path and 'register' not in request.path: return {'next_url': request.path } return {}
from django.conf import settings from django.utils import translation def i18n(request): return {'LANGUAGES': settings.LANGUAGES, 'LANG': settings.LANGUAGE_URL_MAP.get(translation.get_language()) or translation.get_language(), 'DIR': 'rtl' if translation.get_language_bidi() else 'ltr', } def next_url(request): if 'login' not in request.path and 'register' not in request.path: return {'next_url': request.path } return {} def phpbb_logged_in(request): """Detect PHPBB login cookie.""" return { 'PHPBB_LOGGED_IN': (request.COOKIES.get( '%s_u' % settings.PHPBB_COOKIE_PREFIX, '1') != '1'), 'PHPBB_SID': request.COOKIES.get( '%s_sid' % settings.PHPBB_COOKIE_PREFIX), }
Python
0
5c9452a125bd3d2bbeb15224db0a7effa94e5330
Correct showVisible value.
apps/python/PartyLaps/ACTable.py
apps/python/PartyLaps/ACTable.py
""" A table drawing utility for Assetto Corsa. """ class ACTable(object): def __init__(self, ac, window): self.ac = ac self.window = window self.setTablePadding(0, 0) self.setCellSpacing(0, 0) self.data = {} self.cells = {} def draw(self): """ Initialise the data storage array and label array. We are required to store cell data so that the cell information can be retrieved when redrawing due to a font size change. """ self.data = {} # if self.ac is unavailable then we must be in a test and cannot # proceed. if self.ac is None: return # Delete all existing labels for label in self.cells.items(): self.ac.setVisible(label, 0) self.cells = {} for i in range(self.nColumns): for j in range(self.nRows): label = self.ac.addLabel(self.window, "") self.ac.setSize(label, self.columnWidths[i] * self.fontSize, self.fontSize) self.ac.setPosition(label, *self._cellPosition(i, j)) self.ac.setFontSize(label, self.fontSize) self.ac.setFontAlignment(label, self.columnAlignments[i]) self.cells[(i, j)] = label def setSize(self, nColumns, nRows): """ Set the size of the table in columns and rows. """ self.nColumns = nColumns self.nRows = nRows def setFontSize(self, fontSize): self.fontSize = fontSize def setTablePadding(self, paddingX, paddingY): """ Set the pixel amount of padding at the top and left of the table. """ self.paddingX = paddingX self.paddingY = paddingY def setCellSpacing(self, spacingX, spacingY): """ Set the pixel amount of spacing between each cell. """ self.spacingX = spacingX self.spacingY = spacingY def setColumnWidths(self, *columnWidths): """ Set the width of each column. The width is given in multiples of the font size. """ if len(columnWidths) != self.nColumns: raise ValueError("The number of provided column width entries does " "not match the expected number of columns.") self.columnWidths = columnWidths def setColumnAlignments(self, *columnAlignments): """ Set the alignments of each column, possible values are 'left', 'right' and 'center'. """ if len(columnAlignments) != self.nColumns: raise ValueError("The number of provided column alignment entries " "does not match the expected number of columns.") self.columnAlignments = columnAlignments def _cellPosition(self, iX, iY): """ Return the (x,y) co-ordinates for a cell at position iX,iY. """ #self.ac.log(" ".join(map(str, [type(iX), type(iY), type(self.fontSize), type(self.spacing)]))) #self.ac.log(repr(self.columnWidths)) x = self.paddingX + (sum(self.columnWidths[:iX]) * self.fontSize) + (iX * self.spacingX) y = self.paddingY + iY * (self.fontSize + self.spacingY) return (x, y) def setCellValue(self, text, iX, iY): """ Set the cell text at position iX,iY. """ self.ac.setText(self.getCellLabel(iX, iY), text) self.data[(iX, iY)] = text def setFontColor(self, r, g, b, s, iX, iY): """ Set the font color of the cell at iX,iY. """ self.ac.setFontColor(self.getCellLabel(iX, iY), r, g, b, s) def getCellLabel(self, iX, iY): try: return self.cells[(iX, iY)] except KeyError: raise ValueError("Cell not found: (%s,%s)" % (iX, iY)) def addOnClickedListener(self, iX, iY, callback): self.ac.addOnClickedListener(self.getCellLabel(iX, iY), callback)
""" A table drawing utility for Assetto Corsa. """ class ACTable(object): def __init__(self, ac, window): self.ac = ac self.window = window self.setTablePadding(0, 0) self.setCellSpacing(0, 0) self.data = {} self.cells = {} def draw(self): """ Initialise the data storage array and label array. We are required to store cell data so that the cell information can be retrieved when redrawing due to a font size change. """ self.data = {} # if self.ac is unavailable then we must be in a test and cannot # proceed. if self.ac is None: return # Delete all existing labels for label in self.cells.items(): self.ac.setVisible(label, False) self.cells = {} for i in range(self.nColumns): for j in range(self.nRows): label = self.ac.addLabel(self.window, "") self.ac.setSize(label, self.columnWidths[i] * self.fontSize, self.fontSize) self.ac.setPosition(label, *self._cellPosition(i, j)) self.ac.setFontSize(label, self.fontSize) self.ac.setFontAlignment(label, self.columnAlignments[i]) self.cells[(i, j)] = label def setSize(self, nColumns, nRows): """ Set the size of the table in columns and rows. """ self.nColumns = nColumns self.nRows = nRows def setFontSize(self, fontSize): self.fontSize = fontSize def setTablePadding(self, paddingX, paddingY): """ Set the pixel amount of padding at the top and left of the table. """ self.paddingX = paddingX self.paddingY = paddingY def setCellSpacing(self, spacingX, spacingY): """ Set the pixel amount of spacing between each cell. """ self.spacingX = spacingX self.spacingY = spacingY def setColumnWidths(self, *columnWidths): """ Set the width of each column. The width is given in multiples of the font size. """ if len(columnWidths) != self.nColumns: raise ValueError("The number of provided column width entries does " "not match the expected number of columns.") self.columnWidths = columnWidths def setColumnAlignments(self, *columnAlignments): """ Set the alignments of each column, possible values are 'left', 'right' and 'center'. """ if len(columnAlignments) != self.nColumns: raise ValueError("The number of provided column alignment entries " "does not match the expected number of columns.") self.columnAlignments = columnAlignments def _cellPosition(self, iX, iY): """ Return the (x,y) co-ordinates for a cell at position iX,iY. """ #self.ac.log(" ".join(map(str, [type(iX), type(iY), type(self.fontSize), type(self.spacing)]))) #self.ac.log(repr(self.columnWidths)) x = self.paddingX + (sum(self.columnWidths[:iX]) * self.fontSize) + (iX * self.spacingX) y = self.paddingY + iY * (self.fontSize + self.spacingY) return (x, y) def setCellValue(self, text, iX, iY): """ Set the cell text at position iX,iY. """ self.ac.setText(self.getCellLabel(iX, iY), text) self.data[(iX, iY)] = text def setFontColor(self, r, g, b, s, iX, iY): """ Set the font color of the cell at iX,iY. """ self.ac.setFontColor(self.getCellLabel(iX, iY), r, g, b, s) def getCellLabel(self, iX, iY): try: return self.cells[(iX, iY)] except KeyError: raise ValueError("Cell not found: (%s,%s)" % (iX, iY)) def addOnClickedListener(self, iX, iY, callback): self.ac.addOnClickedListener(self.getCellLabel(iX, iY), callback)
Python
0
2bf43e3ba86cc248e752175ffb82f4eab1803119
delete question module had bug previously
survey/models/question_module.py
survey/models/question_module.py
from survey.models import BaseModel from django.db import models class QuestionModule(BaseModel): name = models.CharField(max_length=255) description = models.TextField(null=True, blank=True) def remove_related_questions(self): self.question_templates.all().delete() def __unicode__(self): return self.name
from survey.models import BaseModel from django.db import models class QuestionModule(BaseModel): name = models.CharField(max_length=255) description = models.TextField(null=True, blank=True) def remove_related_questions(self): self.question_templates.delete() def __unicode__(self): return self.name
Python
0
e21e2ff9b8258be5533261f7834438c80b0082cc
Use iter(...) instead of .iter()
framework/tasks/handlers.py
framework/tasks/handlers.py
# -*- coding: utf-8 -*- import logging import functools from flask import g from celery import group from website import settings logger = logging.getLogger(__name__) def celery_before_request(): g._celery_tasks = [] def celery_teardown_request(error=None): if error is not None: return try: tasks = g._celery_tasks if tasks: group(iter(tasks)).apply_async() except AttributeError: if not settings.DEBUG_MODE: logger.error('Task queue not initialized') def enqueue_task(signature): """If working in a request context, push task signature to ``g`` to run after request is complete; else run signature immediately. :param signature: Celery task signature """ try: if signature not in g._celery_tasks: g._celery_tasks.append(signature) except RuntimeError: signature() def queued_task(task): """Decorator that adds the wrapped task to the queue on ``g`` if Celery is enabled, else runs the task synchronously. Can only be applied to Celery tasks; should be used for all tasks fired within a request context that may write to the database to avoid race conditions. """ @functools.wraps(task) def wrapped(*args, **kwargs): if settings.USE_CELERY: signature = task.si(*args, **kwargs) enqueue_task(signature) else: task(*args, **kwargs) return wrapped handlers = { 'before_request': celery_before_request, 'teardown_request': celery_teardown_request, }
# -*- coding: utf-8 -*- import logging import functools from flask import g from celery import group from website import settings logger = logging.getLogger(__name__) def celery_before_request(): g._celery_tasks = [] def celery_teardown_request(error=None): if error is not None: return try: tasks = g._celery_tasks if tasks: group(tasks.iter()).apply_async() except AttributeError: if not settings.DEBUG_MODE: logger.error('Task queue not initialized') def enqueue_task(signature): """If working in a request context, push task signature to ``g`` to run after request is complete; else run signature immediately. :param signature: Celery task signature """ try: if signature not in g._celery_tasks: g._celery_tasks.append(signature) except RuntimeError: signature() def queued_task(task): """Decorator that adds the wrapped task to the queue on ``g`` if Celery is enabled, else runs the task synchronously. Can only be applied to Celery tasks; should be used for all tasks fired within a request context that may write to the database to avoid race conditions. """ @functools.wraps(task) def wrapped(*args, **kwargs): if settings.USE_CELERY: signature = task.si(*args, **kwargs) enqueue_task(signature) else: task(*args, **kwargs) return wrapped handlers = { 'before_request': celery_before_request, 'teardown_request': celery_teardown_request, }
Python
0.000004
9e2f9b040d0dde3237daca1c483c8b2bf0170663
Update Arch package to 2.7
archlinux/archpack_settings.py
archlinux/archpack_settings.py
# # Biicode Arch Linux package settings. # # Check PKGBUILD_template docs for those settings and # what they mean. # def settings(): return { "version": "2.7", "release_number": "1", "arch_deps": ["cmake>=3.0.2", "zlib", "glibc", "sqlite", "wget", "python2-pmw" ], "debian_deps": ["zlib1g", "libc-bin", "libsqlite3-0", "wget", "lib32z1", "python-tk" ] } if __name__ == '__main__': print(settings())
# # Biicode Arch Linux package settings. # # Check PKGBUILD_template docs for those settings and # what they mean. # def settings(): return { "version": "2.6.1", "release_number": "1", "arch_deps": ["cmake>=3.0.2", "zlib", "glibc", "sqlite", "wget", "python2-pmw" ], "debian_deps": ["zlib1g", "libc-bin", "libsqlite3-0", "wget", "lib32z1", "python-tk" ] } if __name__ == '__main__': print(settings())
Python
0
98e4452e07256aa3285906bba60e16ce4dfd1dc3
Replace do_add_subscription() in add_users_to_streams.
zerver/management/commands/add_users_to_streams.py
zerver/management/commands/add_users_to_streams.py
from __future__ import absolute_import from __future__ import print_function from optparse import make_option from typing import Any from django.core.management.base import BaseCommand from zerver.lib.actions import create_stream_if_needed, bulk_add_subscriptions from zerver.models import UserProfile, get_realm, get_user_profile_by_email class Command(BaseCommand): help = """Add some or all users in a realm to a set of streams.""" option_list = BaseCommand.option_list + ( make_option('-d', '--domain', dest='domain', type='str', help='The name of the realm in which you are adding people to streams.'), make_option('-s', '--streams', dest='streams', type='str', help='A comma-separated list of stream names.'), make_option('-u', '--users', dest='users', type='str', help='A comma-separated list of email addresses.'), make_option('-a', '--all-users', dest='all_users', action="store_true", default=False, help='Add all users in this realm to these streams.'), ) def handle(self, **options): # type: (**Any) -> None if options["domain"] is None or options["streams"] is None or \ (options["users"] is None and options["all_users"] is None): self.print_help("python manage.py", "add_users_to_streams") exit(1) stream_names = set([stream.strip() for stream in options["streams"].split(",")]) realm = get_realm(options["domain"]) if options["all_users"]: user_profiles = UserProfile.objects.filter(realm=realm) else: emails = set([email.strip() for email in options["users"].split(",")]) user_profiles = [] for email in emails: user_profiles.append(get_user_profile_by_email(email)) for stream_name in set(stream_names): for user_profile in user_profiles: stream, _ = create_stream_if_needed(user_profile.realm, stream_name) _ignore, already_subscribed = bulk_add_subscriptions([stream], [user_profile]) was_there_already = user_profile.id in {tup[0].id for tup in already_subscribed} print("%s %s to %s" % ( "Already subscribed" if was_there_already else "Subscribed", user_profile.email, stream_name))
from __future__ import absolute_import from __future__ import print_function from optparse import make_option from typing import Any from django.core.management.base import BaseCommand from zerver.lib.actions import create_stream_if_needed, do_add_subscription from zerver.models import UserProfile, get_realm, get_user_profile_by_email class Command(BaseCommand): help = """Add some or all users in a realm to a set of streams.""" option_list = BaseCommand.option_list + ( make_option('-d', '--domain', dest='domain', type='str', help='The name of the realm in which you are adding people to streams.'), make_option('-s', '--streams', dest='streams', type='str', help='A comma-separated list of stream names.'), make_option('-u', '--users', dest='users', type='str', help='A comma-separated list of email addresses.'), make_option('-a', '--all-users', dest='all_users', action="store_true", default=False, help='Add all users in this realm to these streams.'), ) def handle(self, **options): # type: (**Any) -> None if options["domain"] is None or options["streams"] is None or \ (options["users"] is None and options["all_users"] is None): self.print_help("python manage.py", "add_users_to_streams") exit(1) stream_names = set([stream.strip() for stream in options["streams"].split(",")]) realm = get_realm(options["domain"]) if options["all_users"]: user_profiles = UserProfile.objects.filter(realm=realm) else: emails = set([email.strip() for email in options["users"].split(",")]) user_profiles = [] for email in emails: user_profiles.append(get_user_profile_by_email(email)) for stream_name in set(stream_names): for user_profile in user_profiles: stream, _ = create_stream_if_needed(user_profile.realm, stream_name) did_subscribe = do_add_subscription(user_profile, stream) print("%s %s to %s" % ( "Subscribed" if did_subscribe else "Already subscribed", user_profile.email, stream_name))
Python
0
ecd33e00eb5eb8ff58358e01a6d618262e8381a6
Update upstream version of vo
astropy/io/vo/setup_package.py
astropy/io/vo/setup_package.py
from distutils.core import Extension from os.path import join from astropy import setup_helpers def get_extensions(build_type='release'): VO_DIR = 'astropy/io/vo/src' return [Extension( "astropy.io.vo.tablewriter", [join(VO_DIR, "tablewriter.c")], include_dirs=[VO_DIR])] def get_package_data(): return { 'astropy.io.vo': [ 'data/ucd1p-words.txt', 'data/*.xsd', 'data/*.dtd'], 'astropy.io.vo.tests': [ 'data/*.xml', 'data/*.gz', 'data/*.json', 'data/*.fits', 'data/*.txt'], 'astropy.io.vo.validator': [ 'urls/*.dat.gz']} def get_legacy_alias(): return setup_helpers.add_legacy_alias( 'vo', 'astropy.io.vo', '0.8')
from distutils.core import Extension from os.path import join from astropy import setup_helpers def get_extensions(build_type='release'): VO_DIR = 'astropy/io/vo/src' return [Extension( "astropy.io.vo.tablewriter", [join(VO_DIR, "tablewriter.c")], include_dirs=[VO_DIR])] def get_package_data(): return { 'astropy.io.vo': [ 'data/ucd1p-words.txt', 'data/*.xsd', 'data/*.dtd'], 'astropy.io.vo.tests': [ 'data/*.xml', 'data/*.gz', 'data/*.json', 'data/*.fits', 'data/*.txt'], 'astropy.io.vo.validator': [ 'urls/*.dat.gz']} def get_legacy_alias(): return setup_helpers.add_legacy_alias( 'vo', 'astropy.io.vo', '0.7.2')
Python
0
dc7ac28109609e2a90856dbaf01ae8bbb2fd6985
Repair the test (adding a docstring to the module type changed the docstring for an uninitialized module object).
Lib/test/test_module.py
Lib/test/test_module.py
# Test the module type from test_support import verify, vereq, verbose, TestFailed import sys module = type(sys) # An uninitialized module has no __dict__ or __name__, and __doc__ is None foo = module.__new__(module) verify(foo.__dict__ is None) try: s = foo.__name__ except AttributeError: pass else: raise TestFailed, "__name__ = %s" % repr(s) vereq(foo.__doc__, module.__doc__) # Regularly initialized module, no docstring foo = module("foo") vereq(foo.__name__, "foo") vereq(foo.__doc__, None) vereq(foo.__dict__, {"__name__": "foo", "__doc__": None}) # ASCII docstring foo = module("foo", "foodoc") vereq(foo.__name__, "foo") vereq(foo.__doc__, "foodoc") vereq(foo.__dict__, {"__name__": "foo", "__doc__": "foodoc"}) # Unicode docstring foo = module("foo", u"foodoc\u1234") vereq(foo.__name__, "foo") vereq(foo.__doc__, u"foodoc\u1234") vereq(foo.__dict__, {"__name__": "foo", "__doc__": u"foodoc\u1234"}) # Reinitialization should not replace the __dict__ foo.bar = 42 d = foo.__dict__ foo.__init__("foo", "foodoc") vereq(foo.__name__, "foo") vereq(foo.__doc__, "foodoc") vereq(foo.bar, 42) vereq(foo.__dict__, {"__name__": "foo", "__doc__": "foodoc", "bar": 42}) verify(foo.__dict__ is d) if verbose: print "All OK"
# Test the module type from test_support import verify, vereq, verbose, TestFailed import sys module = type(sys) # An uninitialized module has no __dict__ or __name__, and __doc__ is None foo = module.__new__(module) verify(foo.__dict__ is None) try: s = foo.__name__ except AttributeError: pass else: raise TestFailed, "__name__ = %s" % repr(s) vereq(foo.__doc__, None) # Regularly initialized module, no docstring foo = module("foo") vereq(foo.__name__, "foo") vereq(foo.__doc__, None) vereq(foo.__dict__, {"__name__": "foo", "__doc__": None}) # ASCII docstring foo = module("foo", "foodoc") vereq(foo.__name__, "foo") vereq(foo.__doc__, "foodoc") vereq(foo.__dict__, {"__name__": "foo", "__doc__": "foodoc"}) # Unicode docstring foo = module("foo", u"foodoc\u1234") vereq(foo.__name__, "foo") vereq(foo.__doc__, u"foodoc\u1234") vereq(foo.__dict__, {"__name__": "foo", "__doc__": u"foodoc\u1234"}) # Reinitialization should not replace the __dict__ foo.bar = 42 d = foo.__dict__ foo.__init__("foo", "foodoc") vereq(foo.__name__, "foo") vereq(foo.__doc__, "foodoc") vereq(foo.bar, 42) vereq(foo.__dict__, {"__name__": "foo", "__doc__": "foodoc", "bar": 42}) verify(foo.__dict__ is d) if verbose: print "All OK"
Python
0
5abac5e7cdc1d67ec6ed0996a5b132fae20af530
Use the URLs input in the UI boxes
compare_text_of_urls.py
compare_text_of_urls.py
#!/usr/bin/env python from __future__ import print_function import json import os from os.path import join, dirname, abspath import subprocess import sys from get_text_from_url import process_page def main(argv=None): if argv is None: argv = sys.argv arg = argv[1:] # Enter two URLs with a space between them if len(arg) > 0: # Developers can supply URL as an argument... urls = arg[0] else: # ... but normally the URL comes from the allSettings.json file with open(os.path.expanduser("~/allSettings.json")) as settings_json: settings = json.load(settings_json) url1 = settings['source-url'] url2 = settings['source-url2'] assert url1 and url2, 'Two URLs not entered.' diff_urls(url1, url2) def diff_urls(url1, url2): text1 = process_page('text_from_url1', url1) text2 = process_page('text_from_url2', url2) subprocess.check_output("./diff_text.sh", cwd=dirname(abspath(__file__))) if __name__ == '__main__': main()
#!/usr/bin/env python from __future__ import print_function import json import os from os.path import join, dirname, abspath import subprocess import sys from get_text_from_url import process_page def main(argv=None): if argv is None: argv = sys.argv arg = argv[1:] # Enter two URLs with a space between them if len(arg) > 0: # Developers can supply URL as an argument... urls = arg[0] else: # ... but normally the URL comes from the allSettings.json file with open(os.path.expanduser("~/allSettings.json")) as settings: urls = json.load(settings)['source-url'] parsed_urls = urls.strip().split(' ') assert len(parsed_urls) == 2, 'Two URLs not entered.' diff_urls(parsed_urls[0], parsed_urls[1]) def diff_urls(url1, url2): text1 = process_page('text_from_url1', url1) text2 = process_page('text_from_url2', url2) subprocess.check_output("./diff_text.sh", cwd=dirname(abspath(__file__))) if __name__ == '__main__': main()
Python
0
f81c36d4fe31815ed6692b573ad660067151d215
Drop use of 'oslo' namespace package
zaqarclient/_i18n.py
zaqarclient/_i18n.py
# Copyright 2014 Red Hat, Inc # All Rights .Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_i18n import * # noqa _translators = TranslatorFactory(domain='zaqarclient') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical
# Copyright 2014 Red Hat, Inc # All Rights .Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.i18n import * # noqa _translators = TranslatorFactory(domain='zaqarclient') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical
Python
0.998108
962114f65db5de4a0e58ebec93ec8f06147ae790
add RAMONVolume#data
ndio/ramon/RAMONVolume.py
ndio/ramon/RAMONVolume.py
from __future__ import absolute_import from .enums import * from .errors import * import numpy from .RAMONBase import RAMONBase class RAMONVolume(RAMONBase): """ RAMONVolume Object for storing neuroscience data with a voxel volume """ def __init__(self, xyz_offset=(0, 0, 0), resolution=0, cutout=None, voxels=None, id=DEFAULT_ID, confidence=DEFAULT_CONFIDENCE, dynamic_metadata=DEFAULT_DYNAMIC_METADATA, status=DEFAULT_STATUS, author=DEFAULT_AUTHOR): """ Initialize a new RAMONVolume object. Inherits attributes from RAMONBase as well as: Arguments: xyz_offset (int[3] : (0, 0, 0)): x,y,z coordinates of the minimum corner of the cube (if data is a cutout), otherwise empty resolution (int : 0): level in the database resolution hierarchy cutout (numpy.ndarray): dense matrix of data voxels: Unused for now """ self.xyz_offset = xyz_offset self.resolution = resolution self.cutout = cutout self.voxels = voxels RAMONBase.__init__(self, id=id, confidence=confidence, dynamic_metadata=dynamic_metadata, status=status, author=author) def data(self): """ Gets the data from the volume and pumps it into a numpy.ndarray format, regardless of whether it's stored in `cutout` or `voxels`. Returns it as though it were stored in `cutout`. This is useful for cases where you need to operate on a 3D matrix. Arguments: None Returns: numpy.ndarray """ if self.cutout: return self.cutout else: raise NotImplementedError("Cannot convert from voxel list yet.")
from __future__ import absolute_import from .enums import * from .errors import * import numpy from .RAMONBase import RAMONBase class RAMONVolume(RAMONBase): """ RAMONVolume Object for storing neuroscience data with a voxel volume """ def __init__(self, xyz_offset=(0, 0, 0), resolution=0, cutout=None, voxels=None, id=DEFAULT_ID, confidence=DEFAULT_CONFIDENCE, dynamic_metadata=DEFAULT_DYNAMIC_METADATA, status=DEFAULT_STATUS, author=DEFAULT_AUTHOR): """ Initialize a new RAMONVolume object. Inherits attributes from RAMONBase as well as: Arguments: xyz_offset (int[3] : (0, 0, 0)): x,y,z coordinates of the minimum corner of the cube (if data is a cutout), otherwise empty resolution (int : 0): level in the database resolution hierarchy cutout (numpy.ndarray): dense matrix of data voxels: Unused for now """ self.xyz_offset = xyz_offset self.resolution = resolution self.cutout = cutout self.voxels = voxels RAMONBase.__init__(self, id=id, confidence=confidence, dynamic_metadata=dynamic_metadata, status=status, author=author)
Python
0
1cba70e91b6592253a74d2c030e9c57faf0a1485
add header to backend.py
zmq/sugar/backend.py
zmq/sugar/backend.py
"""Import basic exposure of libzmq C API as a backend""" #----------------------------------------------------------------------------- # Copyright (C) 2013 Brian Granger, Min Ragan-Kelley # # This file is part of pyzmq # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # this will be try/except when other try: from zmq.core import ( Context, Socket, IPC_PATH_MAX_LEN, Frame, Message, Stopwatch, device, proxy, strerror, zmq_errno, zmq_poll, zmq_version_info, constants, ) except ImportError: # here will be the cffi backend import, when it exists raise __all__ = [ 'Context', 'Socket', 'Frame', 'Message', 'Stopwatch', 'device', 'proxy', 'zmq_poll', 'strerror', 'zmq_errno', 'constants', 'zmq_version_info', 'IPC_PATH_MAX_LEN', ]
# this will be try/except when other try: from zmq.core import ( Context, Socket, IPC_PATH_MAX_LEN, Frame, Message, Stopwatch, device, proxy, strerror, zmq_errno, zmq_poll, zmq_version_info, constants, ) except ImportError: # here will be the cffi backend import, when it exists raise __all__ = [ 'Context', 'Socket', 'Frame', 'Message', 'Stopwatch', 'device', 'proxy', 'zmq_poll', 'strerror', 'zmq_errno', 'constants', 'zmq_version_info', 'IPC_PATH_MAX_LEN', ]
Python
0.000001
9d3d06e760cb4210405a3b720eb67c5da0478f72
remove succes_message variable
sync_settings/thread_progress.py
sync_settings/thread_progress.py
# -*- coding: utf-8 -*- #Credits to @wbond package_control import sublime, threading class ThreadProgress(): """ Animates an indicator, [= ], in the status area while a thread runs :param thread: The thread to track for activity :param message: The message to display next to the activity indicator :param success_message: The message to display once the thread is complete """ def __init__(self, thread_target, message): self.message = message self.addend = 1 self.size = 8 self.thread = threading.Thread(target=thread_target) self.thread.start() sublime.set_timeout(lambda: self.run(0), 100) def run(self, i): if not self.thread.is_alive(): return before = i % self.size after =(self.size - 1) - before sublime.status_message('Sync Settings: %s [%s=%s]' %(self.message, ' ' * before, ' ' * after)) if not after: self.addend = -1 if not before: self.addend = 1 i += self.addend sublime.set_timeout(lambda: self.run(i), 100)
# -*- coding: utf-8 -*- #Credits to @wbond package_control import sublime, threading class ThreadProgress(): """ Animates an indicator, [= ], in the status area while a thread runs :param thread: The thread to track for activity :param message: The message to display next to the activity indicator :param success_message: The message to display once the thread is complete """ def __init__(self, thread_target, message, success_message = ''): self.message = message self.success_message = success_message self.addend = 1 self.size = 8 self.thread = threading.Thread(target=thread_target) self.thread.start() sublime.set_timeout(lambda: self.run(0), 100) def run(self, i): if not self.thread.is_alive(): if self.success_message != "": self.success_message = 'Sync Settings: %s' %(self.success_message) sublime.status_message(self.success_message) return before = i % self.size after =(self.size - 1) - before sublime.status_message('Sync Settings: %s [%s=%s]' %(self.message, ' ' * before, ' ' * after)) if not after: self.addend = -1 if not before: self.addend = 1 i += self.addend sublime.set_timeout(lambda: self.run(i), 100)
Python
0.000774
c691c256682bec5f9a242ab71ab42d296bbf88a9
Add `Post`, `Tag` models to Admin
nightreads/posts/admin.py
nightreads/posts/admin.py
from django.contrib import admin from .models import Post, Tag admin.site.register(Post) admin.site.register(Tag)
from django.contrib import admin # Register your models here.
Python
0.000001
afea4f0732e68f5cbb38f5a8ac194698aec8e520
Allow any of the previous tasks to satisfy requirements.
taskflow/patterns/linear_flow.py
taskflow/patterns/linear_flow.py
# -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (C) 2012 Yahoo! Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from taskflow import exceptions as exc from taskflow.patterns import ordered_flow def _convert_to_set(items): if not items: return set() if isinstance(items, set): return items if isinstance(items, dict): return items.keys() return set(iter(items)) class Flow(ordered_flow.Flow): """A linear chain of tasks that can be applied as one unit or rolled back as one unit. Each task in the chain may have requirements which are satisfied by the previous task/s in the chain.""" def __init__(self, name, tolerant=False, parents=None): super(Flow, self).__init__(name, tolerant, parents) self._tasks = [] def _fetch_task_inputs(self, task): inputs = {} for r in _convert_to_set(task.requires()): # Find the last task that provided this. for (last_task, last_results) in reversed(self.results): if r not in _convert_to_set(last_task.provides()): continue if last_results and r in last_results: inputs[r] = last_results[r] else: inputs[r] = None # Some task said they had it, get the next requirement. break return inputs def _validate_provides(self, task): # Ensure that some previous task provides this input. missing_requires = [] for r in _convert_to_set(task.requires()): found_provider = False for prev_task in reversed(self._tasks): if r in _convert_to_set(prev_task.provides()): found_provider = True break if not found_provider: missing_requires.append(r) # Ensure that the last task provides all the needed input for this # task to run correctly. if len(missing_requires): msg = ("There is no previous task providing the outputs %s" " for %s to correctly execute.") % (missing_requires, task) raise exc.InvalidStateException(msg) def add(self, task): self._validate_provides(task) self._tasks.append(task) def order(self): return list(self._tasks)
# -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (C) 2012 Yahoo! Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from taskflow import exceptions as exc from taskflow.patterns import ordered_flow def _convert_to_set(items): if not items: return set() if isinstance(items, set): return items if isinstance(items, dict): return items.keys() return set(iter(items)) class Flow(ordered_flow.Flow): """A linear chain of tasks that can be applied as one unit or rolled back as one unit. Each task in the chain may have requirements which are satisfied by the previous task in the chain.""" def __init__(self, name, tolerant=False, parents=None): super(Flow, self).__init__(name, tolerant, parents) self._tasks = [] def _fetch_task_inputs(self, task): inputs = {} if self.results: (_last_task, last_results) = self.results[-1] for k in task.requires(): if last_results and k in last_results: inputs[k] = last_results[k] return inputs def _validate_provides(self, task): requires = _convert_to_set(task.requires()) last_provides = set() last_provider = None if self._tasks: last_provider = self._tasks[-1] last_provides = _convert_to_set(last_provider.provides()) # Ensure that the last task provides all the needed input for this # task to run correctly. req_diff = requires.difference(last_provides) if req_diff: if last_provider is None: msg = ("There is no previous task providing the outputs %s" " for %s to correctly execute.") % (req_diff, task) else: msg = ("%s does not provide the needed outputs %s for %s to" " correctly execute.") msg = msg % (last_provider, req_diff, task) raise exc.InvalidStateException(msg) def add(self, task): self._validate_provides(task) self._tasks.append(task) def order(self): return list(self._tasks)
Python
0.001182
e77380401d04feb1ff283add4dca9f6bad57f330
Rewrite order/tests/MemberViewTests.
haveaniceday/order/tests.py
haveaniceday/order/tests.py
from django.test import TestCase from .models import Member from website_component.models import CustomWebPage, CustomComponent # Create your tests here. class OrderViewTests(TestCase): def test_order_view(self): response = self.client.get('/order/') self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'order/home.html') class MemberViewTests(TestCase): def setUp(self): member = Member( kmID='AB123456', edocID='edoc', emailID='abc', name='捕夢網', location='A局-B分局-C所', title='代理執行秘書') member.save() another_member = Member( name='test') another_member.save() page = CustomWebPage(name='人員清單') page.save() customcomponent = CustomComponent(name='CustomComponent', value='value') customcomponent.page = page customcomponent.save() def tearDown(self): Member.objects.all().delete() def test_member_view(self): r = self.client.get('/order/member/?id=1') self.assertContains(r, 'AB123456') def test_member_all(self): r = self.client.get('/order/member/all/') self.assertContains(r, 'test') self.assertContains(r, 'abc')
from django.test import TestCase from .models import Member # Create your tests here. class OrderViewTests(TestCase): def test_order_view(self): response = self.client.get('/order/') self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'order/home.html') class MemberViewTests(TestCase): def setUp(self): member = Member( kmID='AB123456', edocID='edoc', emailID='abc', name='捕夢網', location='A局-B分局-C所', title='代理執行秘書') member.save() another_member = Member( name='test') another_member.save() def tearDown(self): Member.objects.all().delete() def test_member_view(self): r = self.client.get('/order/member/?id=1') self.assertContains(r, 'AB123456') def test_member_all(self): r = self.client.get('/order/member/all/') self.assertContains(r, 'test') self.assertContains(r, 'abc')
Python
0
718049a991470b6fa95d8db65a6482735219fc57
Fix get_acl_on
openquake/server/utils.py
openquake/server/utils.py
# -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (C) 2015-2017 GEM Foundation # # OpenQuake is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenQuake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with OpenQuake. If not, see <http://www.gnu.org/licenses/>. import getpass import requests import logging import django from time import sleep from django.conf import settings from openquake.engine import __version__ as oqversion if settings.LOCKDOWN: django.setup() from django.contrib.auth.models import User def get_user(request): """ Returns the users from `request` if authentication is enabled, otherwise returns the default user (from settings, or as reported by the OS). """ if settings.LOCKDOWN and hasattr(request, 'user'): user = request.user.username else: user = (settings.DEFAULT_USER if hasattr(settings, 'DEFAULT_USER') else getpass.getuser()) return user def get_valid_users(request): """" Returns a list of `users` based on groups membership. Returns a list made of a single user when it is not member of any group. """ users = [] users.append(get_user(request)) if settings.LOCKDOWN and hasattr(request, 'user'): if request.user.is_authenticated(): groups = request.user.groups.values_list('name', flat=True) if groups: users = list(User.objects.filter(groups__name=groups) .values_list('username', flat=True)) return users def get_acl_on(request): """ Returns `true` if ACL should be honorated, returns otherwise `false`. """ acl_on = settings.ACL_ON if settings.LOCKDOWN and hasattr(request, 'user'): # ACL is always disabled for superusers if request.user.is_superuser: acl_on = False return acl_on def user_has_permission(request, owner): """ Returns `true` if user coming from the request has the permission to view a resource, returns `false` otherwise. """ return (True if owner in get_valid_users(request) or not get_acl_on(request) else False) def oq_server_context_processor(request): """ A custom context processor which allows injection of additional context variables. """ context = {} context['oq_engine_server_url'] = ('//' + request.META.get('HTTP_HOST', 'localhost:8800')) # this context var is also evaluated by the STANDALONE_APPS to identify # the running environment. Keep it as it is context['oq_engine_version'] = oqversion return context def check_webserver_running(url="http://localhost:8800", max_retries=30): """ Returns True if a given URL is responding within a given timeout. """ retry = 0 response = '' success = False while response != requests.codes.ok and retry < max_retries: try: response = requests.head(url, allow_redirects=True).status_code success = True except: sleep(1) retry += 1 if not success: logging.warn('Unable to connect to %s within %s retries' % (url, max_retries)) return success
# -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (C) 2015-2017 GEM Foundation # # OpenQuake is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenQuake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with OpenQuake. If not, see <http://www.gnu.org/licenses/>. import getpass import requests import logging import django from time import sleep from django.conf import settings from openquake.engine import __version__ as oqversion if settings.LOCKDOWN: django.setup() from django.contrib.auth.models import User def get_user(request): """ Returns the users from `request` if authentication is enabled, otherwise returns the default user (from settings, or as reported by the OS). """ if settings.LOCKDOWN and hasattr(request, 'user'): user = request.user.username else: user = (settings.DEFAULT_USER if hasattr(settings, 'DEFAULT_USER') else getpass.getuser()) return user def get_valid_users(request): """" Returns a list of `users` based on groups membership. Returns a list made of a single user when it is not member of any group. """ users = [] users.append(get_user(request)) if settings.LOCKDOWN and hasattr(request, 'user'): if request.user.is_authenticated(): groups = request.user.groups.values_list('name', flat=True) if groups: users = list(User.objects.filter(groups__name=groups) .values_list('username', flat=True)) return users def get_acl_on(request): """ Returns `true` if ACL should be honorated, returns otherwise `false`. """ if settings.LOCKDOWN and hasattr(request, 'user'): if not request.user.is_superuser and settings.ACL_ON: acl_on = True else: acl_on = False return acl_on def user_has_permission(request, owner): """ Returns `true` if user coming from the request has the permission to view a resource, returns `false` otherwise. """ return (True if owner in get_valid_users(request) or not get_acl_on(request) else False) def oq_server_context_processor(request): """ A custom context processor which allows injection of additional context variables. """ context = {} context['oq_engine_server_url'] = ('//' + request.META.get('HTTP_HOST', 'localhost:8800')) # this context var is also evaluated by the STANDALONE_APPS to identify # the running environment. Keep it as it is context['oq_engine_version'] = oqversion return context def check_webserver_running(url="http://localhost:8800", max_retries=30): """ Returns True if a given URL is responding within a given timeout. """ retry = 0 response = '' success = False while response != requests.codes.ok and retry < max_retries: try: response = requests.head(url, allow_redirects=True).status_code success = True except: sleep(1) retry += 1 if not success: logging.warn('Unable to connect to %s within %s retries' % (url, max_retries)) return success
Python
0.000001
4f2b4b07131c462873b87b869e8df1de41af5848
Add some test code
RNACompete/SeqStruct.py
RNACompete/SeqStruct.py
# Copyright 2000-2002 Brad Chapman. # Copyright 2004-2005 by M de Hoon. # Copyright 2007-2015 by Peter Cock. # All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. # Modified Copyright 2016 by Kevin Ha """This class inherits Bio.Seq that adds functionality for handling RNAContextualSequenceSecondaryStructure alphabets. Specifically, will take a RNA sequence and contextual secondary structure sequence and convert it to a unified RNAContextualSequenceSecondaryStructure alphabet. """ from secondarystructure import RNAContextualSequenceSecondaryStructure as RNASS from Bio.Seq import Seq class SeqStruct(Seq): """A read-only Sequence object that extends Bio.Seq Adds extra function for converting RNA sequence and contextual secondary structure sequence into a RNAContextualSequenceSecondaryStructure sequence """ def __init__(self, seq, struct): # Convert sequence and struct sequences newseq = SeqStruct.convert(seq, struct) super(SeqStruct, self).__init__(newseq, RNASS) @staticmethod def convert(seq, struct): """Convert a seq and struct SeqRecord to a new SeqRecord with alphabet RNAContextualSequenceSecondaryStructure """ if len(seq) != len(struct): raise ValueError(('Sequence and structure records have' ' different lengths')) seqstruct_sequence = '' for i,j in zip(seq, struct): seqstruct_sequence += RNASS.convert(i, j) return seqstruct_sequence if __name__ == "__main__": s = SeqStruct('AGC', 'BBB') print s
# Copyright 2000-2002 Brad Chapman. # Copyright 2004-2005 by M de Hoon. # Copyright 2007-2015 by Peter Cock. # All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. # Modified Copyright 2016 by Kevin Ha """This class inherits Bio.Seq that adds functionality for handling RNAContextualSequenceSecondaryStructure alphabets. Specifically, will take a RNA sequence and contextual secondary structure sequence and convert it to a unified RNAContextualSequenceSecondaryStructure alphabet. """ from secondarystructure import RNAContextualSequenceSecondaryStructure as RNASS from Bio.Seq import Seq class SeqStruct(Seq): """A read-only Sequence object that extends Bio.Seq Adds extra function for converting RNA sequence and contextual secondary structure sequence into a RNAContextualSequenceSecondaryStructure sequence """ def __init__(self, seq, struct): # Convert sequence and struct sequences newseq = SeqStruct.convert(seq, struct) super(SeqStruct, self).__init__(newseq, RNASS) @staticmethod def convert(seq, struct): """Convert a seq and struct SeqRecord to a new SeqRecord with alphabet RNAContextualSequenceSecondaryStructure """ if len(seq) != len(struct): raise ValueError(('Sequence and structure records have' ' different lengths')) seqstruct_sequence = '' for i,j in zip(seq, struct): seqstruct_sequence += RNASS.convert(i, j) return seqstruct_sequence
Python
0.000037
951c0dbcfeb016dbde6e1a7a3f0eacc506c9211e
Rename sockjs router prefix to /ws/api/
ws.py
ws.py
import json from tornado import web, ioloop from sockjs.tornado import SockJSRouter, SockJSConnection from blimp.utils.websockets import WebSocketsRequest class RESTAPIConnection(SockJSConnection): def on_open(self, info): self.send_json({'connected': True}) def on_message(self, data): response = WebSocketsRequest(data).get_response() self.send_json(response) def send_json(self, obj): self.send(json.dumps(obj)) if __name__ == '__main__': import logging import argparse parser = argparse.ArgumentParser() parser.add_argument( '--port', help='Optional port number. Defaults to 8080', default=8080, ) parser.add_argument( '--debug', help='Verbosity level set to DEBUG. Defaults to WARNING.', action='store_const', dest='loglevel', const=logging.DEBUG, default=logging.WARNING ) parser.add_argument( '--verbose', help='Verbosity level set to INFO.', action='store_const', dest='loglevel', const=logging.INFO ) args = parser.parse_args() port = args.port logging.getLogger().setLevel(args.loglevel) EchoRouter = SockJSRouter(RESTAPIConnection, '/ws/api/') app = web.Application(EchoRouter.urls) app.listen(port) logging.info(" [*] Listening on 0.0.0.0:{}".format(port)) ioloop.IOLoop.instance().start()
import json from tornado import web, ioloop from sockjs.tornado import SockJSRouter, SockJSConnection from blimp.utils.websockets import WebSocketsRequest class EchoConnection(SockJSConnection): def on_open(self, info): self.send_json({'connected': True}) def on_message(self, data): response = WebSocketsRequest(data).get_response() self.send_json(response) def send_json(self, obj): self.send(json.dumps(obj)) if __name__ == '__main__': import logging import argparse parser = argparse.ArgumentParser() parser.add_argument('--port', help='Optional port number. Defaults to 8080', default=8080, ) parser.add_argument('--debug', help='Verbosity level set to DEBUG. Defaults to WARNING.', action='store_const', dest='loglevel', const=logging.DEBUG, default=logging.WARNING ) parser.add_argument('--verbose', help='Verbosity level set to INFO.', action='store_const', dest='loglevel', const=logging.INFO ) args = parser.parse_args() port = args.port logging.getLogger().setLevel(args.loglevel) EchoRouter = SockJSRouter(EchoConnection, '/echo') app = web.Application(EchoRouter.urls) app.listen(port) logging.info(" [*] Listening on 0.0.0.0:{}".format(port)) ioloop.IOLoop.instance().start()
Python
0.000002
4fe8a1c1b294f0d75a901d4e8e80f47f5583e44e
Fix for test failure introduced by basic auth changes
pages/lms/info.py
pages/lms/info.py
from e2e_framework.page_object import PageObject from ..lms import BASE_URL class InfoPage(PageObject): """ Info pages for the main site. These are basically static pages, so we use one page object to represent them all. """ # Dictionary mapping section names to URL paths SECTION_PATH = { 'about': '/about', 'faq': '/faq', 'press': '/press', 'contact': '/contact', 'terms': '/tos', 'privacy': '/privacy', 'honor': '/honor', } # Dictionary mapping URLs to expected css selector EXPECTED_CSS = { '/about': 'section.vision', '/faq': 'section.faq', '/press': 'section.press', '/contact': 'section.contact', '/tos': 'section.tos', '/privacy': 'section.privacy-policy', '/honor': 'section.honor-code', } @property def name(self): return "lms.info" @property def requirejs(self): return [] @property def js_globals(self): return [] def url(self, section=None): return BASE_URL + self.SECTION_PATH[section] def is_browser_on_page(self): # Find the appropriate css based on the URL for url_path, css_sel in self.EXPECTED_CSS.iteritems(): if self.browser.url.endswith(url_path): return self.is_css_present(css_sel) # Could not find the CSS based on the URL return False @classmethod def sections(cls): return cls.SECTION_PATH.keys()
from e2e_framework.page_object import PageObject from ..lms import BASE_URL class InfoPage(PageObject): """ Info pages for the main site. These are basically static pages, so we use one page object to represent them all. """ # Dictionary mapping section names to URL paths SECTION_PATH = { 'about': '/about', 'faq': '/faq', 'press': '/press', 'contact': '/contact', 'terms': '/tos', 'privacy': '/privacy', 'honor': '/honor', } # Dictionary mapping URLs to expected css selector EXPECTED_CSS = { '/about': 'section.vision', '/faq': 'section.faq', '/press': 'section.press', '/contact': 'section.contact', '/tos': 'section.tos', '/privacy': 'section.privacy-policy', '/honor': 'section.honor-code', } @property def name(self): return "lms.info" @property def requirejs(self): return [] @property def js_globals(self): return [] def url(self, section=None): return BASE_URL + self.SECTION_PATH[section] def is_browser_on_page(self): stripped_url = self.browser.url.replace(BASE_URL, "") css_sel = self.EXPECTED_CSS[stripped_url] return self.is_css_present(css_sel) @classmethod def sections(cls): return cls.SECTION_PATH.keys()
Python
0
4d942291734641bbdd6a71e16167fefca37a68e7
Fix default config file path on auto creation
passpie/config.py
passpie/config.py
import copy import logging import os import yaml DEFAULT_CONFIG_PATH = os.path.join(os.path.expanduser('~'), '.passpierc') DB_DEFAULT_PATH = os.path.join(os.path.expanduser('~'), '.passpie') DEFAULT_CONFIG = { 'path': DB_DEFAULT_PATH, 'short_commands': False, 'key_length': 4096, 'genpass_length': 32, 'genpass_symbols': "_-#|+=", 'table_format': 'fancy_grid', 'headers': ['name', 'login', 'password', 'comment'], 'colors': {'name': 'yellow', 'login': 'green'}, 'repo': True, 'status_repeated_passwords_limit': 5, 'copy_timeout': 0, 'extension': '.pass', 'recipient': None } def read_config(path): try: with open(path) as config_file: content = config_file.read() config = yaml.load(content) except IOError: logging.debug('config file "%s" not found' % path) return {} except yaml.scanner.ScannerError as e: logging.error('Malformed user configuration file {}'.format(e)) return {} return config def create(path, default=True, **kwargs): config_path = os.path.join(os.path.expanduser(path), '.passpierc') with open(config_path, 'w') as config_file: if default: config_file.write(yaml.dump(DEFAULT_CONFIG, default_flow_style=False)) else: config_file.write(yaml.dump(kwargs, default_flow_style=False)) def load(): if not os.path.isfile(DEFAULT_CONFIG_PATH): create(DEFAULT_CONFIG_PATH, default=True) global_config = read_config(DEFAULT_CONFIG_PATH) config = copy.deepcopy(DEFAULT_CONFIG) config.update(global_config) local_config = read_config(os.path.join(config['path'], '.passpierc')) config.update(local_config) return config
import copy import logging import os import yaml DEFAULT_CONFIG_PATH = os.path.join(os.path.expanduser('~'), '.passpierc') DB_DEFAULT_PATH = os.path.join(os.path.expanduser('~'), '.passpie') DEFAULT_CONFIG = { 'path': DB_DEFAULT_PATH, 'short_commands': False, 'key_length': 4096, 'genpass_length': 32, 'genpass_symbols': "_-#|+=", 'table_format': 'fancy_grid', 'headers': ['name', 'login', 'password', 'comment'], 'colors': {'name': 'yellow', 'login': 'green'}, 'repo': True, 'status_repeated_passwords_limit': 5, 'copy_timeout': 0, 'extension': '.pass', 'recipient': None } def read_config(path): try: with open(path) as config_file: content = config_file.read() config = yaml.load(content) except IOError: logging.debug('config file "%s" not found' % path) return {} except yaml.scanner.ScannerError as e: logging.error('Malformed user configuration file {}'.format(e)) return {} return config def create(path, default=True, **kwargs): config_path = os.path.join(os.path.expanduser(path), '.passpierc') with open(config_path, 'w') as config_file: if default: config_file.write(yaml.dump(DEFAULT_CONFIG, default_flow_style=False)) else: config_file.write(yaml.dump(kwargs, default_flow_style=False)) def load(): if not os.path.isfile(DEFAULT_CONFIG_PATH): create(DEFAULT_CONFIG['path'], default=True) global_config = read_config(DEFAULT_CONFIG_PATH) config = copy.deepcopy(DEFAULT_CONFIG) config.update(global_config) local_config = read_config(os.path.join(config['path'], '.passpierc')) config.update(local_config) return config
Python
0.000001
2559fa50a80ac90f36c3aed251bf397f1af83dd2
bump version to 0.1b2
paste/__init__.py
paste/__init__.py
name = 'paste' version = '0.1b2'
name = 'paste' version = '0.1b1'
Python
0.000001
cf716e0d35df2a76c57c0b08a027c092ff60fd47
Refactor `.open(...)` for clarity
pdfplumber/pdf.py
pdfplumber/pdf.py
import itertools import logging import pathlib from pdfminer.layout import LAParams from pdfminer.pdfdocument import PDFDocument from pdfminer.pdfinterp import PDFResourceManager from pdfminer.pdfpage import PDFPage from pdfminer.pdfparser import PDFParser from pdfminer.psparser import PSException from .container import Container from .page import Page from .utils import resolve_and_decode logger = logging.getLogger(__name__) class PDF(Container): cached_properties = Container.cached_properties + ["_pages"] def __init__( self, stream, pages=None, laparams=None, password="", strict_metadata=False, ): self.laparams = None if laparams is None else LAParams(**laparams) self.stream = stream self.pages_to_parse = pages self.doc = PDFDocument(PDFParser(stream), password=password) self.rsrcmgr = PDFResourceManager() self.metadata = {} for info in self.doc.info: self.metadata.update(info) for k, v in self.metadata.items(): try: self.metadata[k] = resolve_and_decode(v) except Exception as e: if strict_metadata: # Raise an exception since unable to resolve the metadata value. raise # This metadata value could not be parsed. Instead of failing the PDF # read, treat it as a warning only if `strict_metadata=False`. logger.warning( f'[WARNING] Metadata key "{k}" could not be parsed due to ' f"exception: {str(e)}" ) @classmethod def open(cls, path_or_fp, **kwargs): is_path = isinstance(path_or_fp, (str, pathlib.Path)) fp = open(path_or_fp, "rb") if is_path else path_or_fp try: inst = cls(fp, **kwargs) except PSException: if is_path: fp.close() raise if is_path: inst.close_file = fp.close return inst @property def pages(self): if hasattr(self, "_pages"): return self._pages doctop = 0 pp = self.pages_to_parse self._pages = [] for i, page in enumerate(PDFPage.create_pages(self.doc)): page_number = i + 1 if pp is not None and page_number not in pp: continue p = Page(self, page, page_number=page_number, initial_doctop=doctop) self._pages.append(p) doctop += p.height return self._pages @property def objects(self): if hasattr(self, "_objects"): return self._objects all_objects = {} for p in self.pages: for kind in p.objects.keys(): all_objects[kind] = all_objects.get(kind, []) + p.objects[kind] self._objects = all_objects return self._objects @property def annots(self): gen = (p.annots for p in self.pages) return list(itertools.chain(*gen)) @property def hyperlinks(self): gen = (p.hyperlinks for p in self.pages) return list(itertools.chain(*gen))
import itertools import logging import pathlib from pdfminer.layout import LAParams from pdfminer.pdfdocument import PDFDocument from pdfminer.pdfinterp import PDFResourceManager from pdfminer.pdfpage import PDFPage from pdfminer.pdfparser import PDFParser from pdfminer.psparser import PSException from .container import Container from .page import Page from .utils import resolve_and_decode logger = logging.getLogger(__name__) class PDF(Container): cached_properties = Container.cached_properties + ["_pages"] def __init__( self, stream, pages=None, laparams=None, password="", strict_metadata=False, ): self.laparams = None if laparams is None else LAParams(**laparams) self.stream = stream self.pages_to_parse = pages self.doc = PDFDocument(PDFParser(stream), password=password) self.rsrcmgr = PDFResourceManager() self.metadata = {} for info in self.doc.info: self.metadata.update(info) for k, v in self.metadata.items(): try: self.metadata[k] = resolve_and_decode(v) except Exception as e: if strict_metadata: # Raise an exception since unable to resolve the metadata value. raise # This metadata value could not be parsed. Instead of failing the PDF # read, treat it as a warning only if `strict_metadata=False`. logger.warning( f'[WARNING] Metadata key "{k}" could not be parsed due to ' f"exception: {str(e)}" ) @classmethod def open(cls, path_or_fp, **kwargs): if isinstance(path_or_fp, (str, pathlib.Path)): fp = open(path_or_fp, "rb") try: inst = cls(fp, **kwargs) except PSException: fp.close() raise inst.close_file = fp.close return inst else: return cls(path_or_fp, **kwargs) @property def pages(self): if hasattr(self, "_pages"): return self._pages doctop = 0 pp = self.pages_to_parse self._pages = [] for i, page in enumerate(PDFPage.create_pages(self.doc)): page_number = i + 1 if pp is not None and page_number not in pp: continue p = Page(self, page, page_number=page_number, initial_doctop=doctop) self._pages.append(p) doctop += p.height return self._pages @property def objects(self): if hasattr(self, "_objects"): return self._objects all_objects = {} for p in self.pages: for kind in p.objects.keys(): all_objects[kind] = all_objects.get(kind, []) + p.objects[kind] self._objects = all_objects return self._objects @property def annots(self): gen = (p.annots for p in self.pages) return list(itertools.chain(*gen)) @property def hyperlinks(self): gen = (p.hyperlinks for p in self.pages) return list(itertools.chain(*gen))
Python
0
a9808c822e598fd17148b8fc4063ea11f0a270e9
Add bawler specific exception
pg_bawler/core.py
pg_bawler/core.py
''' ============== pg_bawler.core ============== Base classes for LISTEN / NOTIFY. Postgresql documentation for `LISTEN <https://www.postgresql.org/docs/current/static/sql-listen.html>`_ / `NOTIFY <https://www.postgresql.org/docs/current/static/sql-notify.html>`_. ''' import asyncio import logging import aiopg LOGGER = logging.getLogger(name='pg_bawler.core') class PgBawlerException(Exception): ''' Base class for all ``pg_bawler`` related failures ''' def cache_async_def(func): cache_attr_name = '_cache_async_def_{func.__name__}'.format(func=func) async def _cache_method(self, *args, **kwargs): if not hasattr(self, cache_attr_name): setattr(self, cache_attr_name, await func(self, *args, **kwargs)) return getattr(self, cache_attr_name) # simulate functools.update_wrapper _cache_method.__name__ = func.__name__ _cache_method.__doc__ = func.__doc__ _cache_method.__module__ = func.__module__ # save cache_attr_name on function # so delattr(self, func.cache_attr_name) will clear the cache _cache_method.cache_attr_name = cache_attr_name return _cache_method class BawlerBase: ''' Base ``pg_bawler`` class with convenience methods around ``aiopg``. ''' def __init__(self, connection_params, *, loop=None): self.connection_params = connection_params self._connection = None self.loop = asyncio.get_event_loop() if loop is None else loop @cache_async_def async def pg_pool(self): return await aiopg.create_pool( loop=self.loop, **self.connection_params) @cache_async_def async def pg_connection(self): return await (await self.pg_pool()).acquire() async def drop_connection(self): ''' Drops current connection Next call to the ``self.pg_connection`` will acquire new connection from pool. Use this method to drop dead connections on server restart. ''' if hasattr(self, self.pg_connection.cache_attr_name): pg_conn = (await self.pg_connection()) pg_conn.close() await (await self.pg_pool()).release(pg_conn) # clear cached connection property (cache_async_def) delattr(self, self.pg_connection.cache_attr_name) async def __aenter__(self): return self async def __aexit__(self, exc_type, exc, tb): await self.drop_connection()
''' ============== pg_bawler.core ============== Base classes for LISTEN / NOTIFY. Postgresql documentation for `LISTEN <https://www.postgresql.org/docs/current/static/sql-listen.html>`_ / `NOTIFY <https://www.postgresql.org/docs/current/static/sql-notify.html>`_. ''' import asyncio import logging import aiopg LOGGER = logging.getLogger(name='pg_bawler.core') def cache_async_def(func): cache_attr_name = '_cache_async_def_{func.__name__}'.format(func=func) async def _cache_method(self, *args, **kwargs): if not hasattr(self, cache_attr_name): setattr(self, cache_attr_name, await func(self, *args, **kwargs)) return getattr(self, cache_attr_name) # simulate functools.update_wrapper _cache_method.__name__ = func.__name__ _cache_method.__doc__ = func.__doc__ _cache_method.__module__ = func.__module__ # save cache_attr_name on function # so delattr(self, func.cache_attr_name) will clear the cache _cache_method.cache_attr_name = cache_attr_name return _cache_method class BawlerBase: ''' Base ``pg_bawler`` class with convenience methods around ``aiopg``. ''' def __init__(self, connection_params, *, loop=None): self.connection_params = connection_params self._connection = None self.loop = asyncio.get_event_loop() if loop is None else loop @cache_async_def async def pg_pool(self): return await aiopg.create_pool( loop=self.loop, **self.connection_params) @cache_async_def async def pg_connection(self): return await (await self.pg_pool()).acquire() async def drop_connection(self): ''' Drops current connection Next call to the ``self.pg_connection`` will acquire new connection from pool. Use this method to drop dead connections on server restart. ''' if hasattr(self, self.pg_connection.cache_attr_name): pg_conn = (await self.pg_connection()) pg_conn.close() await (await self.pg_pool()).release(pg_conn) # clear cached connection property (cache_async_def) delattr(self, self.pg_connection.cache_attr_name) async def __aenter__(self): return self async def __aexit__(self, exc_type, exc, tb): await self.drop_connection()
Python
0.000001
d350c180606060e9539c12d7d49eed4d6802ac8b
Bump to 1.1.5
pilkit/pkgmeta.py
pilkit/pkgmeta.py
__title__ = 'pilkit' __author__ = 'Matthew Tretter' __version__ = '1.1.5' __license__ = 'BSD' __all__ = ['__title__', '__author__', '__version__', '__license__']
__title__ = 'pilkit' __author__ = 'Matthew Tretter' __version__ = '1.1.4' __license__ = 'BSD' __all__ = ['__title__', '__author__', '__version__', '__license__']
Python
0.000205
b1bd2acbe756922a4cfa2b3a307d60b7e89734c2
Update command.py
pipwin/command.py
pipwin/command.py
# -*- coding: utf-8 -*- """pipwin installs compiled python binaries on windows provided by Christoph Gohlke Usage: pipwin install (<package> | [-r=<file> | --file=<file>]) pipwin uninstall <package> pipwin download (<package> | [-r=<file> | --file=<file>]) [-d=<dest> | --dest=<dest>] pipwin search <package> pipwin list pipwin refresh [--log=<log>] pipwin (-h | --help) pipwin (-v | --version) Options: -h --help Show this screen. -v --version Show version. -r=<file> --file=<file> File with list of package names. -d=<dest> --dest=<dest> Download packages into <dest>. """ from docopt import docopt import sys import platform import logging from warnings import warn from . import pipwin, __version__ from packaging.requirements import Requirement def _package_names(args): if args["--file"]: with open(args["--file"], 'r') as fid: for package in fid.readlines(): if package and not package.startswith('#'): yield Requirement(package.strip()) elif not args["<package>"]: print("Provide a package name") sys.exit(0) else: yield Requirement(args["<package>"].lower()) return def _print_unresolved_match_msg(package, matches): if len(matches) > 0: print("Did you mean any of these ?\n") print(" * " + "\n * ".join(matches)) print("") else: print("Package `{}` not found".format(package.name)) print("Try `pipwin refresh`") def main(): """ Command line entry point """ args = docopt(__doc__, version="pipwin v{}".format(__version__)) # Warn if not on windows if platform.system() != "Windows": warn("Found a non Windows system. Package installation might not work.") # Handle refresh if args["refresh"]: log_level = args.get("--log", None) if log_level: log_level = log_level.upper() try: logging.basicConfig(level=log_level) except ValueError: print("Log level should be DEBUG, INFO, WARNING or ERROR") pipwin.refresh() sys.exit(0) cache = pipwin.PipwinCache() # Handle list if args["list"]: cache.print_list() sys.exit(0) for package in _package_names(args): exact_match, matches = cache.search(package) if not exact_match: _print_unresolved_match_msg(package, matches) if args["--file"]: # We just skip this specific package and work on the others continue else: sys.exit(1) print("Package `{}` found in cache".format(package)) # Handle install/uninstall/download if args["install"]: cache.install(package) elif args["uninstall"]: cache.uninstall(package) elif args["download"]: cache.download(package, dest=args["--dest"])
# -*- coding: utf-8 -*- """pipwin installs compiled python binaries on windows provided by Christoph Gohlke Usage: pipwin install (<package> | [-r=<file> | --file=<file>]) pipwin uninstall <package> pipwin download (<package> | [-r=<file> | --file=<file>]) [-d=<dest> | --dest=<dest>] pipwin search <package> pipwin list pipwin refresh [--log=<log>] pipwin (-h | --help) pipwin (-v | --version) Options: -h --help Show this screen. -v --version Show version. -r=<file> --file=<file> File with list of package names. -d=<dest> --dest=<dest> Download packages into <dest>. """ from docopt import docopt import sys import platform import logging from warnings import warn from . import pipwin, __version__ from packaging.requirements import Requirement def _package_names(args): if args["--file"]: with open(args["--file"], 'r') as fid: for package in fid.readlines(): if package and not package.startswith('#'): yield Requirement(package.strip()) elif not args["<package>"]: print("Provide a package name") sys.exit(0) else: yield Requirement(args["<package>"]) return def _print_unresolved_match_msg(package, matches): if len(matches) > 0: print("Did you mean any of these ?\n") print(" * " + "\n * ".join(matches)) print("") else: print("Package `{}` not found".format(package.name)) print("Try `pipwin refresh`") def main(): """ Command line entry point """ args = docopt(__doc__, version="pipwin v{}".format(__version__)) # Warn if not on windows if platform.system() != "Windows": warn("Found a non Windows system. Package installation might not work.") # Handle refresh if args["refresh"]: log_level = args.get("--log", None) if log_level: log_level = log_level.upper() try: logging.basicConfig(level=log_level) except ValueError: print("Log level should be DEBUG, INFO, WARNING or ERROR") pipwin.refresh() sys.exit(0) cache = pipwin.PipwinCache() # Handle list if args["list"]: cache.print_list() sys.exit(0) for package in _package_names(args): exact_match, matches = cache.search(package) if not exact_match: _print_unresolved_match_msg(package, matches) if args["--file"]: # We just skip this specific package and work on the others continue else: sys.exit(1) print("Package `{}` found in cache".format(package)) # Handle install/uninstall/download if args["install"]: cache.install(package) elif args["uninstall"]: cache.uninstall(package) elif args["download"]: cache.download(package, dest=args["--dest"])
Python
0.000002
3ce78ad88d8c963dfb819b323b40b2415d8624b2
Split create user feature into two functions
api.py
api.py
#!/usr/bin/env python """ Copyright 2016 Brian Quach Licensed under MIT (https://github.com/brianquach/udacity-nano-fullstack-conference/blob/master/LICENSE) # noqa """ import endpoints from protorpc import remote from resource import StringMessage from resource import USER_REQUEST @endpoints.api(name='poker', version='v1') class FiveCardPokerAPI(remote.Service): """An API for a two-player five card poker game.""" @endpoints.method( request_message=USER_REQUEST, response_message=StringMessage, path='user/create', name='createUser', http_method='POST' ) def create_user(self, request): """Create a player.""" return self._create_user(request) def _create_user(request): """Create a player. Username must be unique. Code Citation: https://github.com/udacity/FSND-P4-Design-A-Game/blob/master/Skeleton%20Project%20Guess-a-Number/api.py # noqa """ if User.query(User.name == request.user_name).get(): raise endpoints.ConflictException( 'A User with that name already exists!' ) user = User(name=request.user_name, email=request.email) user.put() return StringMessage(message='User {} created!'.format( request.user_name)) api = endpoints.api_server([FiveCardPokerAPI])
#!/usr/bin/env python """ Copyright 2016 Brian Quach Licensed under MIT (https://github.com/brianquach/udacity-nano-fullstack-conference/blob/master/LICENSE) # noqa """ import endpoints from protorpc import remote from resource import StringMessage from resource import USER_REQUEST @endpoints.api(name='poker', version='v1') class FiveCardPokerAPI(remote.Service): """An API for a two-player five card poker game.""" # Players are delt a five card hand and each player has one opportunity, # starting with player one, to replace up to 5 cards in their hand with new # cards from the top of the deck. # Once each player has finished replacing their cards, each hand is then # revealed. The player with the highest poker hand wins. # Username must be unique. # Code Citation: # https://github.com/udacity/FSND-P4-Design-A-Game/blob/master/Skeleton%20Project%20Guess-a-Number/api.py # noqa @endpoints.method( request_message=USER_REQUEST, response_message=StringMessage, path='user/create', name='createUser', http_method='POST' ) def create_user(self, request): """Create a User.""" if User.query(User.name == request.user_name).get(): raise endpoints.ConflictException( 'A User with that name already exists!' ) user = User(name=request.user_name, email=request.email) user.put() return StringMessage(message='User {} created!'.format( request.user_name)) api = endpoints.api_server([FiveCardPokerAPI])
Python
0
ce8ba26877505481795edd024c3859b14c548ffd
Refactor comics.py
plugins/comics.py
plugins/comics.py
# Copyright 2017 Starbot Discord Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. '''Get comics from xkcd.''' import json from api import command, caching, message, plugin def onInit(plugin_in): '''List commands for plugin.''' xkcd_command = command.command(plugin_in, 'xkcd', shortdesc='Posts the latest XKCD, or by specific ID') return plugin.plugin(plugin_in, 'comics', [xkcd_command]) async def onCommand(message_in): '''Run plugin commands.''' if message_in.command == 'xkcd': if message_in.body: try: if int(message_in.body) < 0: return message.message(body="ID `{}` is not a valid ID".format(message_in.body)) except ValueError: return message.message(body='Input of `{}` is not a valid number'.format(message_in.body)) data = json.loads(caching.getJson("https://xkcd.com/{}/info.0.json".format(message_in.body.strip()), caller='xkcd', customName='{}.json'.format(message_in.body.strip()))) else: data = json.loads(caching.getJson("https://xkcd.com/info.0.json", caller='xkcd', save=False)) caching.downloadToCache(data['img'], '{}.png'.format(data['num']), caller='xkcd') return message.message(body='**{}/{}/{} - {}**\n_{}_'.format(data['month'], data['day'], data['year'], data['safe_title'], data['alt']), file='cache/xkcd_{}.png'.format(data['num']))
# Copyright 2017 Starbot Discord Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from api import command, caching, message, plugin def onInit(plugin_in): xkcd_command = command.command(plugin_in, 'xkcd', shortdesc='Posts the latest XKCD, or by specific ID') return plugin.plugin(plugin_in, 'comics', [xkcd_command]) async def onCommand(message_in): if message_in.command == 'xkcd': if message_in.body != '': try: if int(message_in.body) < 0: return message.message(body="ID `{}` is not a valid ID".format(message_in.body)) except: return message.message(body='Input of `{}` is not a valid number'.format(message_in.body)) data = json.loads(caching.getJson("https://xkcd.com/{}/info.0.json".format(message_in.body.strip()), caller='xkcd', customName='{}.json'.format(message_in.body.strip()))) else: data = json.loads(caching.getJson("https://xkcd.com/info.0.json", caller='xkcd', save=False)) caching.downloadToCache(data['img'], '{}.png'.format(data['num']), caller='xkcd') return message.message(body='**{}/{}/{} - {}**\n_{}_'.format(data['month'], data['day'], data['year'], data['safe_title'], data['alt']), file='cache/xkcd_{}.png'.format(data['num']))
Python
0.000001
318e6b5fd2382766c065574f6b202fd09e68cf6e
increment version #
pmagpy/version.py
pmagpy/version.py
""" Module contains current pmagpy version number. Version number is displayed by GUIs and used by setuptools to assign number to pmagpy/pmagpy-cli. """ "pmagpy-3.11.1" version = 'pmagpy-3.11.1'
""" Module contains current pmagpy version number. Version number is displayed by GUIs and used by setuptools to assign number to pmagpy/pmagpy-cli. """ "pmagpy-3.11.0" version = 'pmagpy-3.11.0'
Python
0.000001
7150413cccbf8f812b3fd4a1fc2b82a4020aed9f
fix heroku postgresql path to allow sqlalchemy upgrade
app.py
app.py
from flask import Flask from flask_sqlalchemy import SQLAlchemy from flask_compress import Compress from flask_debugtoolbar import DebugToolbarExtension import sentry_sdk from sentry_sdk.integrations.flask import FlaskIntegration from sqlalchemy.pool import NullPool import logging import sys import os import requests HEROKU_APP_NAME = "paperbuzz-api" # set up logging # see http://wiki.pylonshq.com/display/pylonscookbook/Alternative+logging+configuration logging.basicConfig( stream=sys.stdout, level=logging.DEBUG, format="%(name)s - %(message)s" ) logger = logging.getLogger("paperbuzz") libraries_to_mum = [ "requests.packages.urllib3", "requests_oauthlib", "stripe", "oauthlib", "boto", "newrelic", "RateLimiter", ] for a_library in libraries_to_mum: the_logger = logging.getLogger(a_library) the_logger.setLevel(logging.WARNING) the_logger.propagate = True requests.packages.urllib3.disable_warnings() # error reporting with sentry sentry_sdk.init( dsn=os.environ.get('SENTRY_DSN'), integrations=[FlaskIntegration()] ) app = Flask(__name__) # database stuff app.config[ "SQLALCHEMY_TRACK_MODIFICATIONS" ] = True # as instructed, to suppress warning db_uri = os.getenv("DATABASE_URL") if db_uri.startswith("postgres://"): db_uri = db_uri.replace("postgres://", "postgresql://", 1) # temp heroku sqlalchemy fix app.config["SQLALCHEMY_DATABASE_URI"] = db_uri app.config["SQLALCHEMY_ECHO"] = os.getenv("SQLALCHEMY_ECHO", False) == "True" # from http://stackoverflow.com/a/12417346/596939 class NullPoolSQLAlchemy(SQLAlchemy): def apply_driver_hacks(self, app, info, options): options["poolclass"] = NullPool return super(NullPoolSQLAlchemy, self).apply_driver_hacks(app, info, options) db = NullPoolSQLAlchemy(app) # do compression. has to be above flask debug toolbar so it can override this. compress_json = os.getenv("COMPRESS_DEBUG", "False") == "True" # set up Flask-DebugToolbar if os.getenv("FLASK_DEBUG", False) == "True": logger.info("Setting app.debug=True; Flask-DebugToolbar will display") compress_json = False app.debug = True app.config["DEBUG"] = True app.config["DEBUG_TB_INTERCEPT_REDIRECTS"] = False app.config["SQLALCHEMY_RECORD_QUERIES"] = True app.config["SECRET_KEY"] = os.getenv("SECRET_KEY") toolbar = DebugToolbarExtension(app) # gzip responses Compress(app) app.config["COMPRESS_DEBUG"] = compress_json
from flask import Flask from flask_sqlalchemy import SQLAlchemy from flask_compress import Compress from flask_debugtoolbar import DebugToolbarExtension import sentry_sdk from sentry_sdk.integrations.flask import FlaskIntegration from sqlalchemy.pool import NullPool import logging import sys import os import requests HEROKU_APP_NAME = "paperbuzz-api" # set up logging # see http://wiki.pylonshq.com/display/pylonscookbook/Alternative+logging+configuration logging.basicConfig( stream=sys.stdout, level=logging.DEBUG, format="%(name)s - %(message)s" ) logger = logging.getLogger("paperbuzz") libraries_to_mum = [ "requests.packages.urllib3", "requests_oauthlib", "stripe", "oauthlib", "boto", "newrelic", "RateLimiter", ] for a_library in libraries_to_mum: the_logger = logging.getLogger(a_library) the_logger.setLevel(logging.WARNING) the_logger.propagate = True requests.packages.urllib3.disable_warnings() # error reporting with sentry sentry_sdk.init( dsn=os.environ.get('SENTRY_DSN'), integrations=[FlaskIntegration()] ) app = Flask(__name__) # database stuff app.config[ "SQLALCHEMY_TRACK_MODIFICATIONS" ] = True # as instructed, to suppress warning app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv("DATABASE_URL") app.config["SQLALCHEMY_ECHO"] = os.getenv("SQLALCHEMY_ECHO", False) == "True" # from http://stackoverflow.com/a/12417346/596939 class NullPoolSQLAlchemy(SQLAlchemy): def apply_driver_hacks(self, app, info, options): options["poolclass"] = NullPool return super(NullPoolSQLAlchemy, self).apply_driver_hacks(app, info, options) db = NullPoolSQLAlchemy(app) # do compression. has to be above flask debug toolbar so it can override this. compress_json = os.getenv("COMPRESS_DEBUG", "False") == "True" # set up Flask-DebugToolbar if os.getenv("FLASK_DEBUG", False) == "True": logger.info("Setting app.debug=True; Flask-DebugToolbar will display") compress_json = False app.debug = True app.config["DEBUG"] = True app.config["DEBUG_TB_INTERCEPT_REDIRECTS"] = False app.config["SQLALCHEMY_RECORD_QUERIES"] = True app.config["SECRET_KEY"] = os.getenv("SECRET_KEY") toolbar = DebugToolbarExtension(app) # gzip responses Compress(app) app.config["COMPRESS_DEBUG"] = compress_json
Python
0
b8bf769d55a61f9d29b15c4b657d9df293045304
convert to int
app.py
app.py
#!/usr/bin/env python from flask import Flask, render_template, Response, request from i2c import I2C from camera_pi import Camera RobotArduino = I2C(); #The robot's arduino controller app = Flask(__name__, template_folder='site') @app.route('/') def index(): """Main page: controller + video stream""" return render_template('index.html') @app.route('/action', methods=['POST']) def action(): """Handle button presses - Send commands to the robot""" val = request.form.get('command') print("Sending ["+str(val)+"] To Arduino") RobotArduino.writeNumber(int(val)) return ('',204) #no response def gen(camera): """Video streaming generator function.""" while True: frame = camera.get_frame() yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') @app.route('/video_feed') def video_feed(): """Video streaming route. Put this in the src attribute of an img tag.""" return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame') if __name__ == '__main__': app.run(host='0.0.0.0', debug=True, threaded=True, port=8000)
#!/usr/bin/env python from flask import Flask, render_template, Response, request from i2c import I2C from camera_pi import Camera RobotArduino = I2C(); #The robot's arduino controller app = Flask(__name__, template_folder='site') @app.route('/') def index(): """Main page: controller + video stream""" return render_template('index.html') @app.route('/action', methods=['POST']) def action(): """Handle button presses - Send commands to the robot""" val = request.form.get('command') print("Sending ["+str(val)+"] To Arduino") RobotArduino.writeNumber(val) return ('',204) #no response def gen(camera): """Video streaming generator function.""" while True: frame = camera.get_frame() yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') @app.route('/video_feed') def video_feed(): """Video streaming route. Put this in the src attribute of an img tag.""" return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame') if __name__ == '__main__': app.run(host='0.0.0.0', debug=True, threaded=True, port=8000)
Python
1
c432ae2dc25b2af77b3f57d610b111a24919d987
Add collision detection on paste creation
app.py
app.py
__author__ = 'zifnab' from flask import Flask, redirect, request, render_template, flash, current_app as app, abort from mongoengine import connect from flask_admin import Admin from flask_debugtoolbar import DebugToolbarExtension from flask_login import LoginManager, login_user, login_required, logout_user, current_user from flask_wtf import Form from wtforms.fields import * from wtforms.validators import * from passlib.hash import sha512_crypt from datetime import datetime, timedelta, date import database import arrow from util import random_string app = Flask(__name__) with app.app_context(): import auth from config import local_config app.config.from_object(local_config) db = connect('zifbin') admin = Admin(app) import admin toolbar = DebugToolbarExtension(app) class PasteForm(Form): text = TextAreaField('Paste Here', validators=[Required()]) expiration = SelectField('Expiration', choices=[('0', 'Expires Never'), ('1', 'Expires In Fifteen Minutes'), ('2', 'Expires In Thirty Minutes'), ('3', 'Expires In One Hour'), ('4', 'Expires In Six Hours'), ('5', 'Expires In One Day')], default='3') @app.route('/', methods=('POST', 'GET')) @app.route('/new', methods=('POST', 'GET')) def main(): form = PasteForm(request.form) if form.validate_on_submit(): times = { '0':None, '1':{'minutes':+15}, '2':{'minutes':+30}, '3':{'hours':+1}, '4':{'hours':+6}, '5':{'days':+1} } paste = database.Paste(paste=form.text.data) if (current_user.is_authenticated()): paste.user = current_user.to_dbref() paste.name = random_string() collision_check = database.Paste.objects(name__exact=paste.name).first() while collision_check is not None: paste.name = random_string() collision_check = database.Paste.objects(name__exact=paste.name).first() if times.get(form.expiration.data) is not None: paste.expire = arrow.utcnow().replace(**times.get(form.expiration.data)).datetime paste.save() return redirect('/{id}'.format(id=paste.name)) return render_template('new_paste.html', form=form) @app.route('/<string:id>') def get(id): paste = database.Paste.objects(name__exact=id).first() if paste is None: abort(404) elif paste.expire is not None and arrow.get(paste.expire) < arrow.utcnow(): abort(404) else: return render_template("paste.html", paste=paste, title=paste.id) app.debug = app.config['DEBUG'] def run(): app.run( host=app.config.get('HOST', None), port=app.config.get('PORT', None) )
__author__ = 'zifnab' from flask import Flask, redirect, request, render_template, flash, current_app as app, abort from mongoengine import connect from flask_admin import Admin from flask_debugtoolbar import DebugToolbarExtension from flask_login import LoginManager, login_user, login_required, logout_user, current_user from flask_wtf import Form from wtforms.fields import * from wtforms.validators import * from passlib.hash import sha512_crypt from datetime import datetime, timedelta, date import database import arrow from util import random_string app = Flask(__name__) with app.app_context(): import auth from config import local_config app.config.from_object(local_config) db = connect('zifbin') admin = Admin(app) import admin toolbar = DebugToolbarExtension(app) class PasteForm(Form): text = TextAreaField('Paste Here', validators=[Required()]) expiration = SelectField('Expiration', choices=[('0', 'Expires Never'), ('1', 'Expires In Fifteen Minutes'), ('2', 'Expires In Thirty Minutes'), ('3', 'Expires In One Hour'), ('4', 'Expires In Six Hours'), ('5', 'Expires In One Day')], default='3') @app.route('/', methods=('POST', 'GET')) @app.route('/new', methods=('POST', 'GET')) def main(): form = PasteForm(request.form) if form.validate_on_submit(): times = { '0':None, '1':{'minutes':+15}, '2':{'minutes':+30}, '3':{'hours':+1}, '4':{'hours':+6}, '5':{'days':+1} } paste = database.Paste(paste=form.text.data) if (current_user.is_authenticated()): paste.user = current_user.to_dbref() paste.name = random_string() if times.get(form.expiration.data) is not None: paste.expire = arrow.utcnow().replace(**times.get(form.expiration.data)).datetime paste.save() return redirect('/{id}'.format(id=paste.name)) return render_template('new_paste.html', form=form) @app.route('/<string:id>') def get(id): paste = database.Paste.objects(name__exact=id).first() if paste is None: abort(404) elif paste.expire is not None and arrow.get(paste.expire) < arrow.utcnow(): abort(404) else: return render_template("paste.html", paste=paste, title=paste.id) app.debug = app.config['DEBUG'] def run(): app.run( host=app.config.get('HOST', None), port=app.config.get('PORT', None) )
Python
0
038196b5bc478ff561b6f8031ecbcb37a765ba3e
Change to be more pep8 compliant.
bot.py
bot.py
import praw import urllib import cv2 import numpy as np from PIL import Image import time import getpass import re # Eye Classifier eyeData = "xml/eyes.xml" eyeClass = cv2.CascadeClassifier(eyeData) # Glasses Asset glasses = cv2.imread('assets/glasses.png', cv2.IMREAD_UNCHANGED) ratio = glasses.shape[1] / glasses.shape[0] # How much we are going to downscale image while processing it. DOWNSCALE = 4 foundImage = False # List of posts already processed. already_done = [] # Super secret Reddit password. password = getpass.getpass("Reddit password: ") def process_image(url, frame, eyeList): for eye in eyeList: x, y, w, h = [v * DOWNSCALE for v in eye] h = w / ratio y += h / 2 # resize glasses to a new var called small glasses smallglasses = cv2.resize(glasses, (w, h)) # the area you want to change bg = frame[y:y+h, x:x+w] bg *= np.atleast_3d(255 - smallglasses[:, :, 3])/255.0 bg += smallglasses[:, :, 0:3] * np.atleast_3d(smallglasses[:, :, 3]) # put the changed image back into the scene frame[y:y+h, x:x+w] = bg print("Found image. Writing image.") cv2.imwrite(url, frame) while True: foundImage = False r = praw.Reddit('/u/powderblock Glasses Bot') r.login('DealWithItbot', password) for post in r.get_subreddit('all').get_new(limit=20): if post not in already_done: already_done.append(post) if "imgur.com" in post.url and (".jpg" in post.url or ".png" in post.url): print(post.url) response = urllib.urlopen(str(post.url)) # load the image we want to detect features on # Convert rawImage to Mat filearray = np.asarray(bytearray(response.read()), dtype=np.uint8) frame = cv2.imdecode(filearray, cv2.CV_LOAD_IMAGE_UNCHANGED) if frame is None or frame.size is None: print("Error, couldn't load image, skipping.") # Skip to next image continue if frame.shape[0] > 5000 or frame.shape[1] > 5000: print("Image is too large, skipping.") continue if frame.shape[0] == 0 or frame.shape[1] == 0: print("Image has a width or height of 0, skipping.") continue minisize = (frame.shape[1]/DOWNSCALE,frame.shape[0]/DOWNSCALE) miniframe = cv2.resize(frame, minisize) eyes = eyeClass.detectMultiScale(miniframe) if len(eyes) > 0: print(str(post.url)) foundImage = True process_image(str(post.url), frame, eyes) if not foundImage: print("No image with eyes found.") time.sleep(30)
import praw import urllib import cv2, numpy as np from PIL import Image import time import getpass import re # Eye Classifier eyeData = "xml/eyes.xml" eyeClass = cv2.CascadeClassifier(eyeData) # Glasses Asset glasses = cv2.imread('assets/glasses.png', cv2.IMREAD_UNCHANGED) ratio = glasses.shape[1] / glasses.shape[0] # How much we are going to downscale image while processing it. DOWNSCALE = 4 foundImage = False # List of posts already processed. already_done = [] # Super secret Reddit password. password = getpass.getpass("Reddit password: ") def process_image(url, frame, eyeList): for eye in eyeList: x, y, w, h = [v * DOWNSCALE for v in eye] h = w / ratio y += h / 2 # resize glasses to a new var called small glasses smallglasses = cv2.resize(glasses, (w, h)) # the area you want to change bg = frame[y:y+h, x:x+w] bg *= np.atleast_3d(255 - smallglasses[:, :, 3])/255.0 bg += smallglasses[:, :, 0:3] * np.atleast_3d(smallglasses[:, :, 3]) # put the changed image back into the scene frame[y:y+h, x:x+w] = bg print("Found image. Writing image.") cv2.imwrite(url, frame) while True: foundImage = False r = praw.Reddit('/u/powderblock Glasses Bot') r.login('DealWithItbot', password) for post in r.get_subreddit('all').get_new(limit=20): if post not in already_done: already_done.append(post) if "imgur.com" in post.url and (".jpg" in post.url or ".png" in post.url): print(post.url) response = urllib.urlopen(str(post.url)) # load the image we want to detect features on # Convert rawImage to Mat filearray = np.asarray(bytearray(response.read()), dtype=np.uint8) frame = cv2.imdecode(filearray, cv2.CV_LOAD_IMAGE_UNCHANGED) if frame is None or frame.size is None: print("Error, couldn't load image, skipping.") # Skip to next image continue if frame.shape[0] > 5000 or frame.shape[1] > 5000: print("Image is too large, skipping.") continue if frame.shape[0] == 0 or frame.shape[1] == 0: print("Image has a width or height of 0, skipping.") continue minisize = (frame.shape[1]/DOWNSCALE,frame.shape[0]/DOWNSCALE) miniframe = cv2.resize(frame, minisize) eyes = eyeClass.detectMultiScale(miniframe) if len(eyes) > 0: print(str(post.url)) foundImage = True process_image(str(post.url), frame, eyes) if not foundImage: print("No image with eyes found.") time.sleep(30)
Python
0
f67807b0f2064e1c6374fe4c10ed87c7a9222426
mark all events after processing it
bot.py
bot.py
#! /usr/bin/env python from time import gmtime, strftime from foaas import foaas from diaspy_client import Client import re import urllib2 def log_write(text): f = open('bot.log', 'a') f.write(strftime("%a, %d %b %Y %H:%M:%S ", gmtime())) f.write(text) f.write('\n') f.close() client=Client() notify = client.notifications() for n in notify: if not n.unread: continue m = re.search('\shas\smentioned.+post\s([^\/]+)\s(.+)\.+$', str(n)) try: if hasattr(m, 'group'): command = m.group(2).replace(' ', '__') client.post(foaas(command)) except urllib2.URLError: log_write("ERROR: "+str(n)) # finally mark as read n.mark()
#! /usr/bin/env python from time import gmtime, strftime from foaas import foaas from diaspy_client import Client import re import urllib2 def log_write(text): f = open('bot.log', 'a') f.write(strftime("%a, %d %b %Y %H:%M:%S ", gmtime())) f.write(text) f.write('\n') f.close() client=Client() notify = client.notifications() for n in notify: if not n.unread: continue m = re.search('\shas\smentioned.+post\s([^\/]+)\s(.+)\.+$', str(n)) try: if hasattr(m, 'group'): command = m.group(2).replace(' ', '__') client.post(foaas(command)) # finally mark as read n.mark() except urllib2.URLError: log_write("ERROR: "+str(n))
Python
0