commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
ca8dec97321fdf2ceee459b95c3d885edebca15b | Bump DeletionWatcher up to 20 minutes | deletionwatcher.py | deletionwatcher.py | import json
import requests
import time
import websocket
from bs4 import BeautifulSoup
from threading import Thread
from metasmoke import Metasmoke
from globalvars import GlobalVars
from datahandling import is_false_positive, is_ignored_post, get_post_site_id_link
class DeletionWatcher:
@classmethod
def update_site_id_list(self):
soup = BeautifulSoup(requests.get("http://meta.stackexchange.com/topbar/site-switcher/site-list").text)
site_id_dict = {}
for site in soup.findAll("a", attrs={"data-id": True}):
site_name = site["href"][2:]
site_id = site["data-id"]
site_id_dict[site_name] = site_id
GlobalVars.site_id_dict = site_id_dict
@classmethod
def check_websocket_for_deletion(self, post_site_id, post_url, timeout):
time_to_check = time.time() + timeout
post_id = post_site_id[0]
post_type = post_site_id[2]
if post_type == "answer":
question_id = str(get_post_site_id_link(post_site_id))
if question_id is None:
return
else:
question_id = post_id
post_site = post_site_id[1]
if post_site not in GlobalVars.site_id_dict:
return
site_id = GlobalVars.site_id_dict[post_site]
ws = websocket.create_connection("ws://qa.sockets.stackexchange.com/")
ws.send(site_id + "-question-" + question_id)
while time.time() < time_to_check:
ws.settimeout(time_to_check - time.time())
try:
a = ws.recv()
except websocket.WebSocketTimeoutException:
t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post,
args=(post_url, False))
t_metasmoke.start()
return False
if a is not None and a != "":
try:
d = json.loads(json.loads(a)["data"])
except:
continue
if d["a"] == "post-deleted" and str(d["qId"]) == question_id and ((post_type == "answer" and "aId" in d and str(d["aId"]) == post_id) or post_type == "question"):
t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post,
args=(post_url, True))
t_metasmoke.start()
return True
t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post,
args=(post_url, False))
t_metasmoke.start()
return False
@classmethod
def check_if_report_was_deleted(self, post_site_id, post_url, message):
was_report_deleted = self.check_websocket_for_deletion(post_site_id, post_url, 600)
if was_report_deleted:
try:
message.delete()
except:
pass
@classmethod
def post_message_if_not_deleted(self, post_site_id, post_url, message_text, room):
was_report_deleted = self.check_websocket_for_deletion(post_site_id, post_url, 300)
if not was_report_deleted and not is_false_positive(post_site_id[0:2]) and not is_ignored_post(post_site_id[0:2]):
room.send_message(message_text)
| Python | 0 | @@ -2776,9 +2776,10 @@
rl,
-6
+12
00)%0A
|
2bcfccb3b4b0d6f79fd62d84e98495392b86795a | Remove print statements | mail/views.py | mail/views.py | from django import http
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.contrib import messages
from django.conf import settings
from django.contrib.auth.decorators import login_required
from mail import models as mail_api
from mailgun import api as mailgun_api
from mail.email import send_email
from sequence import models as sequence_model
import bleach
import datetime
import re
import requests
def _text_from_html(html):
expression = re.compile(r'<a.*?href="(?P<url>.*?)".*?>(?P<text>.*?)</a>')
# rewrite links
html = expression.sub(r'\2 ( \1 ) ', html)
# remove all HTML markup
return bleach.clean(html, tags=[], strip=True)
def _rewrite_links(html):
expression = re.compile(r'(?P<url>http://email.{}/c/.*?)[\"\' ]'.format(settings.MAILGUN_API_DOMAIN))
print (expression.pattern)
# for every link
while expression.search(html):
match = expression.search(html)
url = match.group('url')
print("Old url: {}".format(url))
try:
resp = requests.get(url, allow_redirects=False)
if resp.status_code != 302:
return resp
raise Exception('Mailgun URL did not redirect. Status code: {}. URL: {}. Headers: {}'.format(resp.status_code, resp.url, resp.headers))
new_url = resp.headers['location']
print("New url: {}".format(new_url))
html = html[:match.start('url')] + new_url + html[match.end('url'):]
except Exception as e:
print(e)
break;
return html
@login_required
def compose( request ):
if request.method == 'POST':
subject = request.POST.get('subject')
html_body = _rewrite_links(request.POST.get('body_text'))
text_body = _text_from_html(html_body)
tags = request.POST.get('tags')
sequence = 1
audience = 'individuals'
if request.POST.get('to', None):
sequence = int(request.POST.get('to').split('-')[1])
audience = request.POST.get('to').split('-')[0]
mail_api.save_email(subject, text_body, html_body, sequence, audience, tags)
return http.HttpResponseRedirect(
reverse('mail_schedule')
)
context = {
'sequences': sequence_model.get_all_sequences()
}
return render_to_response(
'mail/compose.html',
context,
context_instance=RequestContext(request)
)
@login_required
def edit( request, id ):
email_uri = mail_api.id2uri(id)
email = mail_api.get_email(email_uri)
if request.method == 'POST':
subject = request.POST.get('subject')
html_body = _rewrite_links(request.POST.get('body_text'))
text_body = _text_from_html(html_body)
tags = request.POST.get('tags')
sequence = int(request.POST.get('to').split('-')[1])
audience = request.POST.get('to').split('-')[0]
mail_api.update_email(email_uri, subject, text_body, html_body,
sequence, audience, tags)
return http.HttpResponseRedirect(reverse('mail_schedule'))
context = {
'sequences': sequence_model.get_all_sequences(),
'email': email,
}
return render_to_response(
'mail/compose.html',
context,
context_instance=RequestContext(request)
)
@login_required
def send_preview( request ):
""" ajax view to send preview email """
if request.method == 'POST':
subject = request.POST.get('subject')
html_body = _rewrite_links(request.POST.get('body_text'))
text_body = _text_from_html(html_body)
to_email = request.POST.get('test_email')
mailgun_api.send_email(to_email, settings.DEFAULT_FROM_EMAIL, subject, text_body, html_body)
return http.HttpResponse('')
raise Exception()
@login_required
def send( request, id ):
#TODO should require a POST
email_uri = mail_api.id2uri(id)
send_email(email_uri)
return http.HttpResponseRedirect(reverse('mail_schedule'))
@login_required
def delete( request, id ):
#TODO should require a POST
email_uri = mail_api.id2uri(id)
mail_api.delete_email(email_uri)
return http.HttpResponseRedirect(reverse('mail_schedule'))
@login_required
def schedule( request ):
context = {
'schedule': mail_api.get_emails()
}
return render_to_response('mail/schedule.html', context, context_instance=RequestContext(request))
@login_required
def schedule_email( request, id ):
email_uri = mail_api.id2uri(id)
date_text = request.POST.get('scheduled_date')
time_text = request.POST.get('scheduled_time')
if len(date_text) == 0:
return http.HttpResponse(_('Please choose a date.'), status=400)
if len(time_text) == 0:
return http.HttpResponse(_('Please choose a time.'), status=400)
date_text += time_text
dt = datetime.datetime.strptime(date_text, '%Y-%m-%d%H:%M')
if dt < datetime.datetime.utcnow():
return http.HttpResponse(_('Scheduled time is in the past'), status=400)
mail_api.schedule_email(email_uri, dt)
return http.HttpResponse('')
| Python | 0.001543 | @@ -936,39 +936,8 @@
IN))
-%0A print (expression.pattern)
%0A%0A
@@ -1067,49 +1067,8 @@
l')%0A
- print(%22Old url: %7B%7D%22.format(url))%0A
@@ -1407,57 +1407,8 @@
n'%5D%0A
- print(%22New url: %7B%7D%22.format(new_url))%0A
@@ -1484,16 +1484,16 @@
url'):%5D%0A
+
@@ -1519,29 +1519,8 @@
e:%0A
- print(e)%0A
|
38c0e0235a4c28a8b6627d1160efe318d95015bf | Revert "Simplifiquem la forma de mirar si l'attach es valid" | mailticket.py | mailticket.py | import email
import hashlib
import base64
import re
from email.header import decode_header
from email.utils import parseaddr
from email.utils import parsedate_tz, mktime_tz
import datetime
import settings
import logging
logger = logging.getLogger(__name__)
class MailTicket:
""" Classe que encapsula un mail que es convertira en un ticket """
def __init__(self,fitxer):
self.filtrar_attachments_per_nom=settings.get("filtrar_attachments_per_nom")
self.filtrar_attachments_per_hash=settings.get("filtrar_attachments_per_hash")
self.mails_no_ticket=settings.get("mails_no_ticket")
self.msg = email.message_from_file(fitxer)
# Farem lazy initialization d'aquestes 2 properties per si hi ha algun error
self.body=None
self.subject=None
def tracta_body(self):
if not self.msg.is_multipart():
part=self.msg
body=self.codifica(part)
self.body=self.text2html(body)
else:
self.part_body=0
el_body_es_html=False
for part in self.msg.walk():
self.part_body=self.part_body+1
if part.get_content_type() in ['multipart/alternative']:
el_body_es_html=True
if part.get_content_type() in ['text/html']:
self.body=self.codifica(part)
break
if part.get_content_type() in ['text/plain'] and not el_body_es_html:
body=self.codifica(part)
self.body=self.text2html(body)
break
def codifica(self,part):
if part.get_content_charset()!=None:
s=unicode(part.get_payload(decode=True), part.get_content_charset(), "ignore")
else:
s=unicode(part.get_payload(decode=True))
# Aixo es perque pot haver-hi caracters no imprimibles que s'han de filtrar.
# Nomes admetem els salts de linia, tabuladors i a partir del 32
return "".join([x if ord(x)==9 or ord(x)==10 or ord(x)==13 or ord(x)>=32 else '' for x in s])
def nomes_ascii(self,s):
return "".join([x if ord(x)==9 or ord(x)==10 or ord(x)==13 or (ord(x)>=32 and ord(x)<=128) else '' for x in s])
def tracta_subject(self):
subject=self.msg['Subject']
if subject==None:
self.subject=u""
return
resultat=u""
fragments=decode_header(subject)
for fragment in fragments:
if fragment[1]==None:
resultat+=self.nomes_ascii(fragment[0])
else:
resultat+=" "+fragment[0].decode(fragment[1])
self.subject=resultat.replace('\n', ' ').replace('\r', '')
def get_header(self,header):
if header in ('From','Resent-From','Reply-To','Resent-To'): return self.get_email_header(header)
elif header in ('Subject'): return self.get_subject()
else: return self.msg[header]
def get_email_header(self,header):
email=parseaddr(self.msg[header])[1]
if len(email)==0: return None
return email.lower()
def get_from(self):
return self.get_email_header('From')
def get_resent_from(self):
return self.get_email_header('Resent-From')
def get_reply_to(self):
return self.get_email_header('Reply-To')
def get_date(self):
try:
d=self.msg['Date']
tt = parsedate_tz(d)
timestamp = mktime_tz(tt)
aux=datetime.datetime.fromtimestamp(timestamp)
return aux
except:
logger.debug("No puc parsejar la data!")
return None
def get_to(self):
to=parseaddr(self.msg['To'])[1]
try:
email=parseaddr(self.msg['Resent-To'])[1]
if email==None or len(email)==0:
email=to
except:
email=to
finally:
return email.lower()
def get_subject(self):
if self.subject==None:
self.tracta_subject()
return self.subject
def get_subject_ascii(self):
return self.get_subject().encode('ascii','ignore')
def get_body(self):
if self.body==None:
self.tracta_body()
return self.body
def text2html(self,text):
text=text.replace("<","<")
text=text.replace(">",">")
return "<br>\n".join(text.split("\n"))
def get_attachments(self):
if self.body==None:
self.tracta_body()
attachments=[]
if self.msg.is_multipart():
i=0
for part in self.msg.walk():
i=i+1
if (i>self.part_body) and self.comprovar_attachment_valid(part):
logger.debug("Part: %s" % part.get_content_type())
attachments.append(part)
return attachments
def comprovar_attachment_valid(self,attachment):
if attachment.is_multipart():
return False
filename=attachment.get_filename()
contingut=attachment.get_payload()
if filename!=None:
for f in self.filtrar_attachments_per_nom:
p=re.compile(f)
if p.match(filename):
return False
# Si es molt llarg es valid segur, no sera una signatura!
if len(contingut)>1000000:
return True
# Segona part: mirem que no sigui un fitxer prohibit per hash
try:
hash_attachment=hashlib.md5(base64.b64decode(contingut)).hexdigest()
logger.info("Hash:"+hash_attachment)
return hash_attachment not in self.filtrar_attachments_per_hash
except:
logger.info("Tinc un attachment del que no puc calcular el hash")
return True
def te_attachments(self):
return len(self.get_attachments())>0
def cal_tractar(self):
if self.get_from() in self.mails_no_ticket: return False
if self.msg.get_content_type()=="multipart/report": return False
if "Return Receipt" in self.get_body(): return False
if "DELIVERY FAILURE" in self.get_subject(): return False
if "Informe de lectura" in self.get_subject(): return False
return True
def __str__(self):
return self.msg.__str__()
| Python | 0 | @@ -4309,16 +4309,46 @@
rt_body)
+ and (not part.is_multipart())
and sel
@@ -4573,69 +4573,8 @@
):%0D%0A
- if attachment.is_multipart():%0D%0A return False%0D%0A %0D%0A
|
cceb88b877b71f5c4659959055a4cd92847f0426 | Tweak startup/shutdown log entries to be more visible and informative | __main__.py | __main__.py | #!/usr/bin/env python3
# Run script for CrabBot
# A mess of config args and terminal polling code
#
# See -h or read the argparse setup for argument details
import argparse
import datetime
import logging
import os
import readline # Only for better terminal input support, eg. history
import sys
from tempfile import gettempdir # for PID file (for easier service management)
from threading import Thread
import crabbot.common
import crabbot.cogs.messages
import crabbot.cogs.quotes
import crabbot.cogs.voice # comment out to disable voice commands entirely
pid = str(os.getpid())
pidfile = gettempdir() + '/CrabBot.pid' # eg. so systemd's PIDFile can find a /tmp/CrabBot.pid
with open(pidfile, 'w') as temppidfile:
temppidfile.write(pid)
logging.basicConfig(filename='crabbot.log', level=logging.INFO) # Grr, ytdl doesn't log
logging.info("Starting crabbot at " + str(datetime.datetime.now()))
# Do argparse first so that -h can print and exit before anything else happens
parser = argparse.ArgumentParser(description='A silly Discord bot')
token_args = parser.add_mutually_exclusive_group(required=True)
token_args.add_argument('-t', '--token',
help="The bot user's login token. Use this or -f.")
token_args.add_argument('-f', '--file', type=argparse.FileType('r'),
help="A file with the bot user's login token as the first line. Use this or -t")
parser.add_argument('-p', '--prefix', default="!crab",
help="Command prefix the bot responds to")
parser.add_argument('--assets-path', default="assets/",
help="Path for general assets (ex. sir-places.txt)")
parser.add_argument('--memes-path', default="assets/memes",
help="Path for memes audio clips (and its filelist.txt)")
parser.add_argument('--quotes-path', default="../", # NOTE we write to this location, be careful where you put it
help="Path containing the quotes database. Will create quotes.sqlite3 if it does not exist.")
parser.add_argument('--use-libav', action='store_true',
help="Make Voice use Libav instead of FFmpeg")
parser.add_argument('--disable-voice', action='store_true',
help="Disable Voice commands (can be enabled later)")
args = parser.parse_args()
if args.file is not None:
login = args.file.readline().rstrip()
args.file.close()
else:
login = args.token
bot = crabbot.common.CrabBot(prefix=args.prefix)
def poll_terminal():
running = True
# TODO function dict
# TODO handle KeyboardInterrupt exception (cleans up console output)
while running:
term_input = input()
if term_input == "help":
# TODO print terminal command help
print("Uh, no. I'm gonna be annoying instead.")
# NOTE could use function.__doc__ and docstrings for function help
elif term_input == "quit":
# TODO figure out if it's possible to end discord.Client without KeyboardInterrupt
# Probably need to reimplement run() using start() with a different quit condition
# Could also use run() and just throw a KeyboardInterrupt or two.
# Ew...
# For now, tell user how to quit so we don't leave them in the dark
print("Disabling command input. Use ctrl+c to quit the bot.")
running = False
elif term_input.startswith("update_profile"):
profile_args = term_input.split(' ')
bot._update_profile(username=profile_args[1], avatar=profile_args[2])
elif term_input.startswith("disable_voice"):
logging.info("Disabling voice commands")
bot.remove_cog("Voice")
elif term_input.startswith("enable_voice"):
if "crabbot.cogs.voice" in sys.modules:
logging.info("Enabling voice commands")
bot.add_cog(crabbot.cogs.voice.Voice(bot, args.memes_path, args.use_libav))
else:
logging.info("Voice disabled in source. Add/uncomment import for crabbot.voice and relaunch.")
elif term_input.startswith("update_lists"):
bot.update_all_lists()
# Start polling thread as a daemon so the program exits without waiting if ex. the bot crashes
input_thread = Thread(target=poll_terminal, daemon=True)
input_thread.start()
bot.add_cog(crabbot.cogs.messages.Messages(bot, args.assets_path + "/messages"))
bot.add_cog(crabbot.cogs.quotes.Quotes(bot, args.quotes_path))
# Comment out import of voice to disable voice commands
if "crabbot.cogs.voice" in sys.modules and args.disable_voice is False:
bot.add_cog(crabbot.cogs.voice.Voice(bot, args.memes_path, args.use_libav))
# Blocking, must be last. See discord.py Client for more info.
bot.run(login)
# If it reaches here, CrabBot's probably logged out of Discord now
# (CrabBot doesn't log out if it's straight terminated)
logging.info("CrabBot has recieved a SIGINT and has now exited as intended")
print("CrabBot says goodbye")
# Cleanup pidfile
try:
os.remove(pidfile)
except:
pass # Don't try too hard to clean up
| Python | 0 | @@ -844,16 +844,44 @@
ng.info(
+%22________%5Cn%22 +%0A
%22Startin
@@ -882,21 +882,21 @@
tarting
-crabb
+CrabB
ot at %22
@@ -925,17 +925,209 @@
e.now())
-)
+ + %22%5Cn%22%0A %22--------%22) # Make it clear in the log when a new run starts%0A # TODO? Might want a delimiter that is easier to write, eg. for a log parsing script
%0A%0A# Do a
@@ -5212,17 +5212,92 @@
intended
-%22
+%5Cn%22 +%0A %22%E2%80%94%E2%80%94%E2%80%94%E2%80%94%E2%80%94 CrabBot exited at %22 + str(datetime.datetime.now())
)%0Aprint(
|
a492e805fa51940d746a1d251232bc4f13417165 | fix waftools/man.py to install manpages again. | waftools/man.py | waftools/man.py | import Common, Object, Utils, Node, Params
import sys, os
import gzip
from misc import copyobj
def gzip_func(task):
env = task.m_env
infile = task.m_inputs[0].abspath(env)
outfile = task.m_outputs[0].abspath(env)
input = open(infile, 'r')
output = gzip.GzipFile(outfile, mode='w')
output.write(input.read())
return 0
class manobj(copyobj):
def __init__(self, section=1, type='none'):
copyobj.__init__(self, type)
self.fun = gzip_func
self.files = []
self.section = section
def apply(self):
lst = self.to_list(self.source)
for file in lst:
node = self.path.find_source(file)
if not node: fatal('cannot find input file %s for processing' % file)
target = self.target
if not target or len(lst)>1: target = node.m_name
newnode = self.path.find_build(file+'.gz') #target?
if not newnode:
newnode = Node.Node(file+'.gz', self.path)
self.path.append_build(newnode)
task = self.create_task('copy', self.env, 8)
task.set_inputs(node)
task.set_outputs(newnode)
task.m_env = self.env
task.fun = self.fun
if Params.g_commands['install'] or Params.g_commands['uninstall']:
Common.install_files('MANDIR', 'man' + str(self.section), newnode.abspath(self.env))
def setup(env):
Object.register('man', manobj)
def detect(conf):
return 1
| Python | 0 | @@ -588,22 +588,21 @@
st(self.
-source
+files
)%0A
|
89c1b58da23cfe16e8e195c61313b818a6d5f890 | Add persist.py | darwin/persist.py | darwin/persist.py |
import joblib
from .version import __version__, VERSION
class PersistenceMixin(object):
"""
Mixin that adds joblib persistence load and save function to any class.
"""
@classmethod
def from_file(cls, objdump_path):
'''
Parameters
----------
objdump_path: str
Path to the object dump file.
Returns
-------
instance
New instance of an object from the pickle at the specified path.
'''
obj_version, object = joblib.load(objdump_path)
# Check that we've actually loaded a PersistenceMixin (or sub-class)
if not isinstance(object, cls):
raise ValueError(('The pickle stored at {} does not contain ' +
'a {} object.').format(objdump_path, cls))
# Check that versions are compatible. (Currently, this just checks
# that major versions match)
elif obj_version[0] == VERSION[0]:
if not hasattr(object, 'sampler'):
object.sampler = None
return learner
else:
raise ValueError(("{} stored in pickle file {} was created with version {} "
"of {}, which is incompatible with the current version "
"{}").format(cls, objdump_path, __name__,
'.'.join(obj_version), '.'.join(VERSION)))
def load(self, objdump_path):
'''Replace the current object instance with a saved object.
Parameters
----------
objdump_path: str
The path to the file to load.
'''
del self.__dict__
self.__dict__ = Learner.from_file(objdump_path).__dict__
def save(self, objdump_path):
'''Save the learner to a file.
Parameters
----------
objdump_path: str
The path to where you want to save the learner.
'''
# create the directory if it doesn't exist
learner_dir = os.path.dirname(objdump_path)
if not os.path.exists(learner_dir):
os.makedirs(learner_dir)
# write out the files
joblib.dump((VERSION, self), objdump_path)
| Python | 0.000001 | @@ -84,17 +84,16 @@
bject):%0A
-%0A
%22%22%22%0A
@@ -1081,23 +1081,22 @@
return
-learner
+object
%0A
@@ -1732,15 +1732,24 @@
_ =
-Learner
+PersistenceMixin
.fro
@@ -1833,23 +1833,22 @@
ave the
-learner
+object
to a fi
@@ -1851,16 +1851,17 @@
a file.%0A
+%0A
@@ -1967,23 +1967,22 @@
ave the
-learner
+object
.%0A
|
dd369472eeb5199e19d4aa5bfb0adeb839dcaf97 | move X axis label | buildtimetrend/trend.py | buildtimetrend/trend.py | # vim: set expandtab sw=4 ts=4:
#
# Generates a trend (graph) from the buildtimes in buildtimes.xml
#
# Copyright (C) 2014 Dieter Adriaenssens <ruleant@users.sourceforge.net>
#
# This file is part of buildtime-trend
# <https://github.com/ruleant/buildtime-trend/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from lxml import etree
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
from matplotlib import pyplot as plt
class Trend(object):
def __init__(self):
self.stages = {}
self.builds = []
def gather_data(self, result_file):
# load builtimes file
if os.path.isfile(result_file):
root_xml = etree.parse(result_file).getroot()
else:
print "File doesn't exist : %s" % result_file
return False
index = 0
# print content of buildtimes file
for build_xml in root_xml:
build_id = "#%d" % (index + 1)
build_summary = "Build ID : "
if build_xml.get('id') is None:
build_summary += "unknown"
else:
build_summary += build_xml.get('id')
build_summary += ", Job : "
if build_xml.get('job') is None:
build_id = build_xml.get('id')
build_summary += "unknown"
else:
build_summary += build_xml.get('job')
build_id = build_xml.get('job')
self.builds.append(build_id)
# add 0 to each existing stage, to make sure that
# the indexes of each value
# are correct, even if a stage does not exist in a build
# if a stage exists, the zero will be replaced by its duration
for stage in self.stages:
self.stages[stage].append(0)
# add duration of each stage to stages list
for build_child in build_xml:
if build_child.tag == 'stages':
build_summary += ", stages : " + str(len(build_child))
for stage in build_child:
if (stage.tag == 'stage' and
stage.get('name') is not None and
stage.get('duration') is not None):
if stage.get('name') in self.stages:
temp_dict = self.stages[stage.get('name')]
else:
# when a new stage is added,
# create list with zeros,
# one for each existing build
temp_dict = [0]*(index + 1)
temp_dict[index] = int(stage.get('duration'))
self.stages[stage.get('name')] = temp_dict
print build_summary
index += 1
return True
def generate(self, trend_file):
fig, axes = plt.subplots()
# add data
x_values = range(len(self.builds))
plots = plt.stackplot(x_values, self.stages.values())
plt.xticks(x_values, self.builds, rotation=45, size=10)
# label axes and add graph title
axes.set_xlabel("Builds", {'fontsize': 14})
axes.xaxis.set_label_coords(1.1, -0.05)
axes.set_ylabel("Duration [s]", {'fontsize': 14})
axes.set_title("Build stages trend", {'fontsize': 22})
# display legend
legend_proxies = []
for plot in plots:
legend_proxies.append(
plt.Rectangle((0, 0), 1, 1, fc=plot.get_facecolor()[0]))
# add legend in reverse order, in upper left corner
axes.legend(legend_proxies[::-1], self.stages.keys()[::-1], loc=2)
# save figure
plt.savefig(trend_file)
| Python | 0.000002 | @@ -3918,17 +3918,18 @@
oords(1.
-1
+05
, -0.05)
|
735a52b8ad4ebf7b6b8bb47e14667cd9004e624b | add some mappings | algo/lru.py | algo/lru.py | class Node:
def __init__(self, val):
self.next = None
self.prev = None
self.value = val
class DoublyLinkedList:
def __init__(self):
self.head = None
def insert(self, val):
node = Node(val)
head = self.head
if self.head == None:
self.head = node
else:
while head.next != None:
head = head.next
head.next = node
node.prev = head
def print_list(self):
head = self.head
while head != None:
print head.value
head = head.next
if __name__ == '__main__':
dll = DoublyLinkedList()
for i in range(10):
dll.insert(i)
| Python | 0.000011 | @@ -1,12 +1,30 @@
+mapping = %7B%7D%0A %0A
class Node:%0A
@@ -254,16 +254,52 @@
de(val)%0A
+ mapping%5Bval%5D = node %0A
|
6bb58e13b657c1546f4f5d1afa70d48a9187f168 | Update server.py | gprs/server.py | gprs/server.py | from socket import *
from modules import decode_packet
import sys
from modules import params
Parser = params.Parser()
argv = Parser.createParser()
ip_and_port = argv.parse_args(sys.argv[1:])
#host = ip_and_port.ip
#port = int(ip_and_port.port)
host = "0.0.0.0"
port = 5300
addr = (host, port)
print(host,port)
tcp_socket = socket(AF_INET, SOCK_STREAM)
tcp_socket.bind(addr)
tcp_socket.listen(10)
loop = True
while loop:
data = None
print('wait connection...')
conn, addr = tcp_socket.accept()
while loop:
f = open('logs/gprs.log', 'a+')
data = conn.recv(109)
decode_packet.insert(data)
print(data)
if data:
f.write(str(data))
f.close()
else:
f.close()
break
conn.close()
tcp_socket.close()
| Python | 0.000001 | @@ -267,9 +267,9 @@
= 5
-3
+1
00%0Aa
|
9e5b42fa14b50d91840a67646ed6779d8f5c22ae | Make ``cursor_kinds`` private | bears/c_languages/ClangComplexityBear.py | bears/c_languages/ClangComplexityBear.py | from clang.cindex import Index, CursorKind
from coalib.bears.LocalBear import LocalBear
from coalib.results.Result import Result
from coalib.results.SourceRange import SourceRange
from bears.c_languages.ClangBear import clang_available, ClangBear
class ClangComplexityBear(LocalBear):
"""
Calculates cyclomatic complexity of each function and displays it to the
user.
"""
LANGUAGES = ClangBear.LANGUAGES
REQUIREMENTS = ClangBear.REQUIREMENTS
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Complexity'}
check_prerequisites = classmethod(clang_available)
decisive_cursor_kinds = {
CursorKind.IF_STMT, CursorKind.WHILE_STMT, CursorKind.FOR_STMT,
CursorKind.DEFAULT_STMT, CursorKind.CASE_STMT}
def function_key_points(self, cursor, top_function_level=False):
"""
Calculates number of function's decision points and exit points.
:param top_function_level: Whether cursor is in the top level of
the function.
"""
decisions, exits = 0, 0
for child in cursor.get_children():
if child.kind in self.decisive_cursor_kinds:
decisions += 1
elif child.kind == CursorKind.RETURN_STMT:
exits += 1
if top_function_level:
# There is no point to move forward, so just return.
return decisions, exits
child_decisions, child_exits = self.function_key_points(child)
decisions += child_decisions
exits += child_exits
if top_function_level:
# Implicit return statement.
exits += 1
return decisions, exits
def complexities(self, cursor, filename):
"""
Calculates cyclomatic complexities of functions.
"""
file = cursor.location.file
if file is not None and file.name != filename:
# There is nothing to do in another file.
return
if cursor.kind == CursorKind.FUNCTION_DECL:
child = next((child for child in cursor.get_children()
if child.kind != CursorKind.PARM_DECL),
None)
if child:
decisions, exits = self.function_key_points(child, True)
complexity = max(1, decisions - exits + 2)
yield cursor, complexity
else:
for child in cursor.get_children():
yield from self.complexities(child, filename)
def run(self, filename, file, max_complexity: int=8):
"""
Check for all functions if they are too complicated using the cyclomatic
complexity metric.
You can read more about this metric at
<https://www.wikiwand.com/en/Cyclomatic_complexity>.
:param max_complexity: Maximum cyclomatic complexity that is
considered to be normal. The value of 10 had
received substantial corroborating evidence.
But the general recommendation: "For each
module, either limit cyclomatic complexity to
[the agreed-upon limit] or provide a written
explanation of why the limit was exceeded."
"""
root = Index.create().parse(filename).cursor
for cursor, complexity in self.complexities(root, filename):
if complexity > max_complexity:
affected_code = (SourceRange.from_clang_range(cursor.extent),)
yield Result(
self,
"The function '{function}' should be simplified. Its "
"cyclomatic complexity is {complexity} which exceeds "
"maximal recommended value "
"of {rec_value}.".format(
function=cursor.displayname,
complexity=complexity,
rec_value=max_complexity),
affected_code=affected_code,
additional_info=(
"The cyclomatic complexity is a metric that measures "
"how complicated a function is by counting branches "
"and exits of each function.\n\n"
"Your function seems to be complicated and should be "
"refactored so that it can be understood by other "
"people easily.\n\nSee "
"<http://www.wikiwand.com/en/Cyclomatic_complexity>"
" for more information."))
| Python | 0 | @@ -669,24 +669,25 @@
ilable)%0A
+_
decisive_cur
@@ -1228,16 +1228,17 @@
in self.
+_
decisive
|
22952f57c33070f83c4e9c38b2a96543ed983f4e | Make ndb_persistence execute Context's complete event | furious/extras/appengine/ndb_persistence.py | furious/extras/appengine/ndb_persistence.py | #
# Copyright 2014 WebFilings, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module contains the default functions to use when performing
persistence operations backed by the App Engine ndb library.
"""
import logging
from google.appengine.ext import ndb
class FuriousContextNotFoundError(Exception):
"""FuriousContext entity not found in the datastore."""
class FuriousContext(ndb.Model):
context = ndb.JsonProperty(indexed=False, compressed=True)
@classmethod
def from_context(cls, context):
"""Create a `cls` entity from a context."""
return cls(id=context.id, context=context.to_dict())
@classmethod
def from_id(cls, id):
"""Load a `cls` entity and instantiate the Context it stores."""
from furious.context import Context
# TODO: Handle exceptions and retries here.
entity = cls.get_by_id(id)
if not entity:
raise FuriousContextNotFoundError(
"Context entity not found for: {}".format(id))
return Context.from_dict(entity.context)
class FuriousAsyncMarker(ndb.Model):
"""This entity serves as a 'complete' marker."""
pass
def context_completion_checker(async):
"""Check if all Async jobs within a Context have been run."""
context_id = async.context_id
logging.debug("Check completion for: %s", context_id)
context = FuriousContext.from_id(context_id)
logging.debug("Loaded context.")
task_ids = context.task_ids
logging.debug(task_ids)
offset = 10
for index in xrange(0, len(task_ids), offset):
keys = [ndb.Key(FuriousAsyncMarker, id)
for id in task_ids[index:index + offset]]
markers = ndb.get_multi(keys)
if not all(markers):
logging.debug("Not all Async's complete")
return False
logging.debug("All Async's complete!!")
return True
def store_context(context):
"""Persist a Context object to the datastore."""
logging.debug("Attempting to store Context %s.", context.id)
entity = FuriousContext.from_context(context)
# TODO: Handle exceptions and retries here.
key = entity.put()
logging.debug("Stored Context with key: %s.", key)
def store_async_result(async):
"""Persist the Async's result to the datastore."""
logging.debug("Storing result for %s", async)
pass
def store_async_marker(async):
"""Persist a marker indicating the Async ran to the datastore."""
logging.debug("Attempting to mark Async %s complete.", async.id)
# TODO: Handle exceptions and retries here.
key = FuriousAsyncMarker(id=async.id).put()
logging.debug("Marked Async complete using marker: %s.", key)
| Python | 0.000009 | @@ -2380,16 +2380,61 @@
ete!!%22)%0A
+%0A context.exec_event_handler('complete')%0A%0A
retu
|
d428f6df195c0293340089b884b934fa16ef7ff6 | Use local timezone if available. Fixes #3 | wanikani/cli.py | wanikani/cli.py | import argparse
import logging
import os
from wanikani.core import WaniKani, Radical, Kanji, Vocabulary
CONFIG_PATH = os.path.join(os.path.expanduser('~'), '.wanikani')
logger = logging.getLogger(__name__)
def config():
if os.path.exists(CONFIG_PATH):
logger.debug('Loading config from %s', CONFIG_PATH)
with open(CONFIG_PATH) as f:
return f.read().strip()
return ''
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
# Global Options
parser.add_argument('-a', '--api-key', default=config())
parser.add_argument('-d', '--debug',
action='store_const',
const=logging.DEBUG,
default=logging.WARNING
)
def profile(client, args):
p = client.profile()
print 'Username:', p['username']
print 'Level:', p['level']
profile.parser = subparsers.add_parser('profile')
profile.parser.set_defaults(func=profile)
def level_progress(client, args):
p = client.level_progress()
print p['user_information']['username'], 'level', p['user_information']['level']
print 'Radicals:', p['radicals_total']
print 'Kanji:', p['kanji_total']
level_progress.parser = subparsers.add_parser('progress')
level_progress.parser.set_defaults(func=level_progress)
def recent_unlocks(client, args):
p = client.recent_unlocks()
print p['user_information']['username'], 'level', p['user_information']['level']
for item in p['items']:
print item['level'], item['character']
recent_unlocks.parser = subparsers.add_parser('unlocks')
recent_unlocks.parser.set_defaults(func=recent_unlocks)
def upcoming(client, args):
queue = client.upcoming()
for ts in sorted(queue):
if len(queue[ts]):
radicals, kanji, vocab, total = 0, 0, 0, 0
for obj in queue[ts]:
total += 1
if isinstance(obj, Radical):
radicals += 1
if isinstance(obj, Kanji):
kanji += 1
if isinstance(obj, Vocabulary):
vocab += 1
# Note the trailing commas,
# We only want a newline for the last one
print ts,
print 'Total:', total,
print 'Radials:', radicals,
print 'Kanji:', kanji,
print 'Vocab:', vocab
upcoming.parser = subparsers.add_parser('upcoming')
upcoming.parser.set_defaults(func=upcoming)
def set_key(client, args):
with open(CONFIG_PATH, 'w') as f:
f.write(args.api_key)
print 'Wrote {0} to {1}'.format(args.api_key, CONFIG_PATH)
set_key.parser = subparsers.add_parser('set_key')
set_key.parser.set_defaults(func=set_key)
set_key.parser.add_argument('api_key',help="New API Key")
args = parser.parse_args()
logging.basicConfig(level=args.debug)
client = WaniKani(args.api_key)
args.func(client, args)
| Python | 0.000001 | @@ -35,16 +35,252 @@
ort os%0A%0A
+# If the tzlocal package is installed, then we will help the user out%0A# and print things out in the local timezone%0ALOCAL_TIMEZONE = None%0Atry:%0A import tzlocal%0A LOCAL_TIMEZONE = tzlocal.get_localzone()%0Aexcept ImportError:%0A pass%0A%0A
from wan
@@ -2448,16 +2448,105 @@
b += 1%0A%0A
+ if LOCAL_TIMEZONE:%0A ts.replace(tzinfo=LOCAL_TIMEZONE)%0A
|
b53bee8978c6fe407fce7769e16ac4991e36fcda | Return unknown status if geolocation API is unavailable | client/plugins/geolocation.py | client/plugins/geolocation.py | #!/usr/bin/env python3
import pickle
import json
import os
import re
import requests
import subprocess
import sys
from qlmdm import top_dir, var_dir
from qlmdm.client import get_setting
cache_file = os.path.join(var_dir, 'geolocation.cache')
os.chdir(top_dir)
def unknown():
print(json.dumps('unknown'))
sys.exit()
def old_data_is_good(old_data, ip_addresses, access_points):
if 'response' not in old_data:
return False
try:
old_ip_addresses = set(old_data['ip_addresses'].values())
except:
old_ip_addresses = set()
new_ip_addresses = set(ip_addresses.values())
if old_ip_addresses != new_ip_addresses:
return False
new_mac_addresses = set(a['macAddress'] for a in access_points)
if not new_mac_addresses:
return True
try:
old_mac_addresses = set(a['macAddress']
for a in old_data['access_points'])
except:
old_mac_addresses = set()
percentage_overlap = (100 * len(new_mac_addresses & old_mac_addresses) /
len(new_mac_addresses))
if percentage_overlap > 74:
return True
return False
api_key = get_setting('geolocation_api_key')
if not api_key:
unknown()
address_re = re.compile(
r'\bAddress:\s*([0-9a-f][0-9a-f](?::[0-9a-f][0-9a-f])*)',
re.IGNORECASE)
signal_re = re.compile(r'\bSignal level=(-\d+)\d*dBm')
channel_re = re.compile(r'\bChannel:\s*(\d+)')
access_points = {}
ip_addresses = json.loads(
subprocess.check_output('client/plugins/ip_addresses.py').decode('ascii'))
try:
old_data = pickle.load(open(cache_file, 'rb'))
except:
old_data = {}
# iwlist returns slightly different results every time, so we need to run it
# several times and merge the output.
for i in range(5):
try:
output = subprocess.check_output(
('iwlist', 'scan'), stderr=subprocess.STDOUT).decode('ascii')
except:
unknown()
for cell in re.split(r'\n\s+Cell \d+ ', output):
ap = {}
match = address_re.search(cell)
if not match:
continue
ap['macAddress'] = match.group(1).lower()
match = signal_re.search(cell)
if match:
ap['signalStrength'] = match.group(1)
match = channel_re.search(cell)
if match:
ap['channel'] = match.group(1)
access_points[ap['macAddress']] = ap
# To conserve API quota, don't submit if WiFi access points match the last
# call's 75% or more and the IP addresses haven't changed.
if old_data_is_good(old_data, ip_addresses, access_points.values()):
sys.stderr.write('Using old data\n')
print(json.dumps(old_data['response']))
sys.exit()
data = {}
if access_points:
data['wifiAccessPoints'] = list(access_points.values())
url = 'https://www.googleapis.com/geolocation/v1/geolocate?key={}'.format(
api_key)
response = requests.post(url, data=json.dumps(data))
try:
response.raise_for_status()
except:
unknown()
old_data = {
'response': response.json(),
'ip_addresses': ip_addresses,
'access_points': access_points,
}
pickle.dump(old_data, open(cache_file, 'wb'))
print(json.dumps(response.json()))
| Python | 0.000002 | @@ -2918,16 +2918,25 @@
pi_key)%0A
+try:%0A
response
@@ -2978,22 +2978,28 @@
ps(data)
-)%0Atry:
+, timeout=5)
%0A res
|
c256cf15bd6996b394c36b49fc9b7566abeb55f5 | Fix struct.unpack format for Python 3 | demo/python/cmd.py | demo/python/cmd.py | #!/usr/bin/env python
## It emulates the program "cmd" which is distributed with
## the comedilib software
## Copyright (C) May 2003 Luc Lefebvre <luc.lefebvre@mcgill.ca>
## Mar 2012 W. Trevor King <wking@drexel.edu>
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
#set the paths so python can find the comedi module
import sys, os, string, struct, time
import comedi as c
#open a comedi device
dev=c.comedi_open('/dev/comedi0')
if not dev: raise Exception("Error opening Comedi device")
#get a file-descriptor for use later
fd = c.comedi_fileno(dev)
if fd<=0: raise Exception("Error obtaining Comedi device file descriptor")
BUFSZ = 10000
freq=1000 # as defined in demo/common.c
subdevice=0 #as defined in demo/common.c
nscans=8000 #specify total number of scans
#three lists containing the chans, gains and referencing
#the lists must all have the same length
chans=[0,1,2,3]
gains=[0,0,0,0]
aref =[c.AREF_GROUND, c.AREF_GROUND, c.AREF_GROUND, c.AREF_GROUND]
cmdtest_messages = [
"success",
"invalid source",
"source conflict",
"invalid argument",
"argument conflict",
"invalid chanlist"]
nchans = len(chans) #number of channels
#wrappers include a "chanlist" object (just an Unsigned Int array) for holding the chanlist information
mylist = c.chanlist(nchans) #create a chanlist of length nchans
#now pack the channel, gain and reference information into the chanlist object
#N.B. the CR_PACK and other comedi macros are now python functions
for index in range(nchans):
mylist[index]=c.cr_pack(chans[index], gains[index], aref[index])
def dump_cmd(cmd):
print("---------------------------")
print("command structure contains:")
print("cmd.subdev : ", cmd.subdev)
print("cmd.flags : ", cmd.flags)
print("cmd.start :\t", cmd.start_src, "\t", cmd.start_arg)
print("cmd.scan_beg :\t", cmd.scan_begin_src, "\t", cmd.scan_begin_arg)
print("cmd.convert :\t", cmd.convert_src, "\t", cmd.convert_arg)
print("cmd.scan_end :\t", cmd.scan_end_src, "\t", cmd.scan_end_arg)
print("cmd.stop :\t", cmd.stop_src, "\t", cmd.stop_arg)
print("cmd.chanlist : ", cmd.chanlist)
print("cmd.chanlist_len : ", cmd.chanlist_len)
print("cmd.data : ", cmd.data)
print("cmd.data_len : ", cmd.data_len)
print("---------------------------")
## ret = c.comedi_get_buffer_size(dev, subdevice)
## if ret==-1:
## raise Exception("Error fetching comedi buffer size")
## else:
## print("buffer size = ", ret)
## ret = c.comedi_get_max_buffer_size(dev, subdevice)
## if ret==-1:
## raise Exception("Error fetching comedi max buff size")
## else:
## print("max buff size = ", ret)
#construct a comedi command
cmd = c.comedi_cmd_struct()
period = int(1.0e9/freq) # in nanoseconds
ret = c.comedi_get_cmd_generic_timed(dev,subdevice,cmd,nchans,period)
if ret: raise Exception("Error comedi_get_cmd_generic failed")
cmd.chanlist = mylist # adjust for our particular context
cmd.chanlist_len = nchans
cmd.scan_end_arg = nchans
if cmd.stop_src==c.TRIG_COUNT: cmd.stop_arg=nscans
print("command before testing")
dump_cmd(cmd)
#test our comedi command a few times.
ret = c.comedi_command_test(dev,cmd)
print("first cmd test returns ", ret, cmdtest_messages[ret])
if ret<0: raise Exception("comedi_command_test failed")
dump_cmd(cmd)
ret = c.comedi_command_test(dev,cmd)
print("second test returns ", ret, cmdtest_messages[ret])
if ret<0: raise Exception("comedi_command_test failed")
if ret !=0:
dump_cmd(cmd)
raise Exception("Error preparing command")
#execute the command!
## ret = c.comedi_command(dev,cmd)
## if ret !=0: raise Exception("comedi_command failed...")
datastr = ()
t0 = time.time()
ret = c.comedi_command(dev,cmd)
if ret !=0: raise Exception("comedi_command failed...")
while (1):
#ret = c.comedi_poll(dev,subdevice)
#print("poll ret = ", ret)
data = os.read(fd,BUFSZ)
#print("len(data) = ", len(data))
if len(data)==0:
break
n = len(data)/2 # 2 bytes per 'H'
format = repr(n)+'H'
#print("format = ", format)
#bytes = struct.calcsize(format)
#print("bytes = ", bytes)
#nbytes = c.comedi_get_buffer_contents(dev,subdevice)
#print("n = ", n, " nbytes = ", nbytes)
datastr = datastr + struct.unpack(format,data)
t1 = time.time()
print("start time : ", t0)
print("end time : ", t1)
print("time : ", t1 - t0, " seconds")
count = 0
while count < len(datastr):
for i in range(4):
print("%d\t" % datastr[count+i])
print("\n")
count = count + 4
ret = c.comedi_close(dev)
if ret !=0: raise Exception("comedi_close failed...")
| Python | 0.999997 | @@ -4332,16 +4332,17 @@
n(data)/
+/
2 # 2 by
|
85775847e93b35ac19e09962bc2b10f9be666e33 | Update analysis.py with new finallist.py method | analysis.py | analysis.py | import random
import linecache
from unidecode import unidecode
# ACTUALLY: pick a random line in links-sorted, and translate the numbers from there
# Get a random node, and pull that line from the links doc––want this to be an option
# Pull from links because some titles don't have link lines
lineno = random.randint(1,5706070)
linestr = linecache.getline('links-simple-sorted.txt',lineno)
# Process the string to split the "from" and "to" numbers
[origin, dest] = linestr.split(':')
dest = dest[1:-1] # Gets rid of the first space and trailing newline
dest = dest.split(' ') # Split at spaces
# Translate these into title
oname = lincache.getline('titles-sorted.txt',int(origin))
oname = oname[:-1] # Gets rid of the trailing newline
UNIoname = unidecode(u oname)
for thisnum in dest:
dname = linecache.getline('titles-sorted.txt',int(thisnum))[:-1]
UNIdname = unidecode(linecache.getline('titles-sorted.txt', int(thisnum))[:-1])
# Get some stats bro
linksout = len(dest)
# To get linksin need an adjacency matrix
def assemblematrix():
# Something with links-simple-sorted.txt
# Parse that shit in
def linksin(node):
# Locations of value "1" in the row int(node)
def linksout(node):
# Locations of value "1" in the col int(node)
| Python | 0 | @@ -57,16 +57,224 @@
decode%0A%0A
+# Process links into list%0Afinallist = %5BNone%5D * 5716809%0Awith open('links-simple-sorted.txt', 'r') as src:%0A%09for line in src:%0A%09%09%5BoNode, dNode%5D = line.split(':')%0A%09%09finallist%5Bint(oNode)%5D = dNode.rstrip('%5Cn')%5B1:%5D%0A%0A
# ACTUAL
@@ -413,10 +413,10 @@
doc
-%E2%80%93%E2%80%93
+;
want
@@ -441,74 +441,14 @@
ion%0A
-# Pull from links because some titles don't have link lines%0Alineno
+%0AoNode
= r
@@ -476,275 +476,57 @@
70)%0A
-%0Alinestr = linecache.getline('links-simple-sorted.txt',lineno)%0A%0A# Process the string to split the %22from%22 and %22to%22 numbers%0A%5Borigin, dest%5D = linestr.split(':')%0Adest = dest%5B1:-1%5D # Gets rid of the first space and trailing newline%0Adest = dest.split(' ') # Split at spaces
+dNode = finallist%5BoNode%5D%0AdNode = dNode.split(' ')
%0A%0A#
@@ -551,16 +551,38 @@
to title
+s and print the result
%0Aoname =
@@ -585,16 +585,17 @@
me = lin
+e
cache.ge
@@ -629,13 +629,12 @@
nt(o
-rigin
+Node
))%0Ao
@@ -690,38 +690,74 @@
ine%0A
-UNIoname = unidecode(u oname)%0A
+print '%5CnORIGIN NODE: ' + oname + '%5Cn'%0A%0Aprint 'DESTINATION NODES:'
%0Afor
@@ -769,19 +769,20 @@
num in d
-est
+Node
:%0A%09dname
@@ -846,392 +846,36 @@
1%5D%0A%09
-UNIdname = unidecode(linecache.getline('titles-sorted.txt', int(thisnum))%5B:-1%5D)%0A%0A# Get some stats bro%0Alinksout = len(dest)%0A# To get linksin need an adjacency matrix%0A%0Adef assemblematrix():%0A%09# Something with links-simple-sorted.txt%0A%09# Parse that shit in%0A%0Adef linksin(node):%0A%09# Locations of value %221%22 in the row int(node)%0A%0Adef linksout(node):%0A%09# Locations of value %221%22 in the col int(node)%0A%0A
+print ' ' + dname%0Aprint '%5Cn'
|
6a3f0ade1d8fe16eeda6d339220b7ef877b402e5 | Add no-break options | LFI.TESTER.py | LFI.TESTER.py | '''
@KaiyiZhang Github
'''
import sys
import urllib2
import getopt
import time
target = ''
depth = 6
file = 'etc/passwd'
html = ''
prefix = ''
url = ''
keyword='root'
def usage():
print "LFI.Tester.py Help:"
print "Usage: LFI.TESTER.py -t [-d] [-f] [-k]"
print " -t,--target The test url"
print " -d,--depth The depth for test (Default is 6)"
print " -f,--file The File include (Default is etc/passwd)"
print " -k,--keyword the keyword for vuln check (Default is root)"
try:
if len(sys.argv) < 2:
usage()
sys.exit()
opts,args = getopt.getopt(sys.argv[1:],"ht:d:f:k:",["help","target=","depth=","file=","keyword="])
for opt, arg in opts:
if opt in("-h","--help"):
usage()
sys.exit()
if opt in("-t","--target"):
target = arg
if not target.startswith('http://', 0, 7):
target = 'http://' + target
if opt in("-d","--depth"):
depth = int(arg)
if depth < 1:
usage()
sys.exit()
if opt in("-f","--file"):
file = arg
if file.startswith('/',0,1):
file =file[1:]
if opt in("-k","--keyword"):
keyword = arg
#print keyword
except getopt.GetoptError:
usage()
sys.exit(2)
for i in range(0,depth):
prefix += '../'
url = target + prefix + file
print "Testing: ",url
try:
response = urllib2.urlopen(url)
#print response.info()
html = response.read()
#print html
except:
pass
if(keyword in html):
print url, " is Vulnerable"
break
else:
time.sleep(2)
continue
| Python | 0.998376 | @@ -158,15 +158,31 @@
word
-=
+ =
'root'
+%0Aforce = False
%0A%0Ade
@@ -605,16 +605,17 @@
t:d:f:k:
+n
%22,%5B%22help
@@ -653,16 +653,27 @@
eyword=%22
+,%22no-break%22
%5D)%0A%09%09for
@@ -1142,17 +1142,66 @@
keyword%0A
+%09%09%09if opt in(%22-n%22,%22--no-break%22):%0A%09%09%09%09force = True
%0A
-
except g
@@ -1479,17 +1479,17 @@
ass%0A%09%09if
-(
+
keyword
@@ -1495,17 +1495,16 @@
in html
-)
:%0A%09%09%09%09pr
@@ -1533,21 +1533,64 @@
le%22%0A%09%09%09%09
-break
+if not force:%0A%09%09%09%09%09break%0A%09%09%09%09else:%0A%09%09%09%09%09continue
%0A%09%09else:
|
68c0c054e5b9874f8a6423c35fb83c9de351b9e0 | fix doc build | examples/plot_benktander.py | examples/plot_benktander.py | """
====================================================================
Benktander: Relationship between Chainladder and BornhuetterFerguson
====================================================================
This example demonstrates the relationship between the Chainladder and
BornhuetterFerguson methods by way fo the Benktander model. Each is a
special case of the Benktander model where ``n_iters = 1`` for BornhuetterFerguson
and as ``n_iters`` approaches infinity yields the chainladder. As ``n_iters``
increases the apriori selection becomes less relevant regardless of initial
choice.
"""
import chainladder as cl
# Load Data
clrd = cl.load_sample('clrd')
medmal_paid = clrd.groupby('LOB').sum().loc['medmal', 'CumPaidLoss']
medmal_prem = clrd.groupby('LOB').sum().loc['medmal', 'EarnedPremDIR'].latest_diagonal
medmal_prem.rename('development', ['premium'])
# Generate LDFs and Tail Factor
medmal_paid = cl.Development().fit_transform(medmal_paid)
medmal_paid = cl.TailCurve().fit_transform(medmal_paid)
# Benktander Model
benk = cl.Benktander()
# Prep Benktander Grid Search with various assumptions, and a scoring function
param_grid = dict(n_iters=list(range(1,100,2)),
apriori=[0.50, 0.75, 1.00])
scoring = {'IBNR':lambda x: x.ibnr_.sum()}
grid = cl.GridSearch(benk, param_grid, scoring=scoring)
# Perform Grid Search
grid.fit(medmal_paid, sample_weight=medmal_prem)
# Plot data
grid.results_.pivot(index='n_iters', columns='apriori', values='IBNR').plot(
title='Benktander convergence to Chainladder', grid=True).set(ylabel='IBNR')
| Python | 0 | @@ -823,55 +823,8 @@
onal
-%0Amedmal_prem.rename('development', %5B'premium'%5D)
%0A%0A#
|
15307ebe2c19c1a3983b0894152ba81fdde34619 | Add comment on dist of first function | exp/descriptivestats.py | exp/descriptivestats.py | import pandas
import numpy
import matplotlib.pyplot as plt
def univariate_stats():
num_examples = 1000
z = pandas.Series(numpy.random.randn(num_examples))
# Minimum
print(z.min())
# Maximum
print(z.max())
# Mean
print(z.mean())
# Median
print(z.median())
# Variance
print(z.var())
# Standard deviation
print(z.std())
# Mean absolute deviation
print(z.mad())
# Interquartile range
print(z.quantile(0.75) - z.quantile(0.25))
z.plot(kind="hist")
def multivariate_stats():
num_examples = 1000
x = pandas.Series(numpy.random.randn(num_examples))
y = x + pandas.Series(numpy.random.randn(num_examples))
z = x + pandas.Series(numpy.random.randn(num_examples))
# Covariance
print(y.cov(z))
# Covariance of y with itself is equal to variance
print(y.cov(y), y.var())
# Correlation
print(y.corr(z))
univariate_stats()
multivariate_stats()
plt.show()
| Python | 0 | @@ -70,32 +70,94 @@
ariate_stats():%0A
+ # Generate 1000 random numbers from a normal distribution%0A
num_examples
|
7ff6a0dc3a4f6f1ed47f999340f25fe3d5546bd4 | fix command order in shell help test | tests/ps_schedstatistics/tests/01-run.py | tests/ps_schedstatistics/tests/01-run.py | #!/usr/bin/env python3
# Copyright (C) 2017 Inria
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
from testrunner import run
PS_EXPECTED = (
(r'\tpid | name | state Q | pri | stack \( used\) | '
r'base addr | current | runtime | switches'),
(r'\t - | isr_stack | - - | - | \d+ \( -?\d+\) | '
r'0x\d+ | 0x\d+'),
(r'\t 1 | idle | pending Q | 15 | \d+ \( -?\d+\) | '
r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
(r'\t 2 | main | running Q | 7 | \d+ \( -?\d+\) | '
r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
(r'\t 3 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | '
r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
(r'\t 4 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | '
r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
(r'\t 5 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | '
r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
(r'\t 6 | thread | bl mutex _ | 6 | \d+ \( -?\d+\) | '
r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
(r'\t 7 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | '
r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
(r'\t | SUM | | | \d+ \(\d+\)')
)
def _check_startup(child):
for i in range(5):
child.expect_exact('Creating thread #{}, next={}'
.format(i, (i + 1) % 5))
def _check_help(child):
child.sendline('')
child.expect_exact('>')
child.sendline('help')
child.expect_exact('Command Description')
child.expect_exact('---------------------------------------')
child.expect_exact('reboot Reboot the node')
child.expect_exact('ps Prints information about '
'running threads.')
def _check_ps(child):
child.sendline('ps')
for line in PS_EXPECTED:
child.expect(line)
# Wait for all lines of the ps output to be displayed
child.expect_exact('>')
def testfunc(child):
_check_startup(child)
_check_help(child)
_check_ps(child)
if __name__ == "__main__":
sys.exit(run(testfunc))
| Python | 0.000001 | @@ -1850,71 +1850,8 @@
-')%0A
- child.expect_exact('reboot Reboot the node')%0A
@@ -1961,16 +1961,79 @@
eads.')%0A
+ child.expect_exact('reboot Reboot the node')%0A
%0A%0Adef _c
|
323dbf3bfac8cbfefb90d0b94be0eef245d38f4b | make test short | malss/test.py | malss/test.py | # -*- coding: utf-8 -*-
from sklearn.datasets.samples_generator import make_classification,\
make_regression
from malss import MALSS
import pandas as pd
from nose.plugins.attrib import attr
import numpy as np
def test_classification_2classes_small():
X, y = make_classification(n_samples=1000,
n_features=10,
n_classes=2,
n_informative=2,
n_redundant=0,
n_repeated=0,
weights=[0.7, 0.3],
random_state=0)
X = pd.DataFrame(X)
y = pd.Series(y)
cls = MALSS(X, y, 'classification', n_jobs=3)
cls.execute()
# cls.make_report('test_classification_2classes_small')
assert len(cls.algorithms) == 5
assert cls.algorithms[0].best_score is not None
def test_classification_multiclass_small():
X, y = make_classification(n_samples=1000,
n_features=20,
n_classes=3,
n_informative=10,
weights=[0.6, 0.2, 0.2],
random_state=0)
X = pd.DataFrame(X)
y = pd.Series(y)
cls = MALSS(X, y, 'classification', n_jobs=3)
cls.execute()
# cls.make_report('test_classification_multiclass_small')
assert len(cls.algorithms) == 5
assert cls.algorithms[0].best_score is not None
@attr(speed='slow')
def test_classification_2classes_medium():
X, y = make_classification(n_samples=100000,
n_features=10,
n_classes=2,
n_informative=2,
n_redundant=0,
n_repeated=0,
weights=[0.7, 0.3],
random_state=0)
X = pd.DataFrame(X)
y = pd.Series(y)
cls = MALSS(X, y, 'classification', n_jobs=3)
cls.execute()
# cls.make_report('test_classification_2classes_medium')
assert len(cls.algorithms) == 4
assert cls.algorithms[0].best_score is not None
@attr(speed='slow')
def test_classification_2classes_big():
X, y = make_classification(n_samples=200000,
n_features=20,
n_classes=2,
n_informative=3,
weights=[0.7, 0.3],
random_state=0)
X = pd.DataFrame(X)
y = pd.Series(y)
cls = MALSS(X, y, 'classification', n_jobs=3)
cls.execute()
# cls.make_report('test_classification_2classes_big')
assert len(cls.algorithms) == 1
assert cls.algorithms[0].best_score is not None
def test_regression_small():
X, y = make_regression(n_samples=2000,
n_features=10,
n_informative=5,
noise=30.0,
random_state=0)
X = pd.DataFrame(X)
y = pd.Series(y)
cls = MALSS(X, y, 'regression', n_jobs=3)
cls.execute()
# cls.make_report('test_regression_small')
assert len(cls.algorithms) == 2
assert cls.algorithms[0].best_score is not None
@attr(speed='slow')
def test_regression_medium():
X, y = make_regression(n_samples=20000,
n_features=10,
n_informative=5,
noise=30.0,
random_state=0)
X = pd.DataFrame(X)
y = pd.Series(y)
cls = MALSS(X, y, 'regression', n_jobs=3)
cls.execute()
# cls.make_report('test_regression_medium')
assert len(cls.algorithms) == 1
assert cls.algorithms[0].best_score is not None
@attr(speed='slow')
def test_regression_big():
X, y = make_regression(n_samples=200000,
n_features=10,
n_informative=5,
noise=30.0,
random_state=0)
X = pd.DataFrame(X)
y = pd.Series(y)
cls = MALSS(X, y, 'regression', n_jobs=3)
cls.execute()
# cls.make_report('test_regression_big')
assert len(cls.algorithms) == 1
assert cls.algorithms[0].best_score is not None
def test_classification_categorical():
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Heart.csv',
index_col=0, na_values=[''])
y = data['AHD']
del data['AHD']
cls = MALSS(data, y, 'classification', n_jobs=3)
cls.execute()
# cls.make_report('test_classification_categorical')
assert len(cls.algorithms) == 5
assert cls.algorithms[0].best_score is not None
def test_ndarray():
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Heart.csv',
index_col=0, na_values=[''])
y = data['AHD']
del data['AHD']
cls = MALSS(np.array(data), np.array(y), 'classification', n_jobs=3)
cls.execute()
# cls.make_report('test_ndarray')
assert len(cls.algorithms) == 5
assert cls.algorithms[0].best_score is not None
if __name__ == "__main__":
test_classification_2classes_small()
| Python | 0.000443 | @@ -2017,32 +2017,34 @@
', n_jobs=3)%0A
+ #
cls.execute()%0A
@@ -2139,24 +2139,26 @@
ms) == 4%0A
+ #
assert cls.
|
f2805104cd079a937c6efe03414fe9d0dc4ab3d1 | Work around Galaxy inability to handle optional select parameter with dynamic option | __init__.py | __init__.py | #!/usr/bin/env python
###
# Part of the Adaptive Divergence through Direction of Selection workflow.
# Copyright (C) 2011 Tim te Beek <tim.te.beek@nbic.nl>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###
"""Package divergence"""
from pkg_resources import resource_filename #@UnresolvedImport #pylint: disable=E0611
from zipfile import ZipFile, ZIP_DEFLATED, is_zipfile
import Bio
import getopt
import httplib2
import logging
import os
import shutil
import sys
#Configure logging LOG_FORMAT
LOG_FORMAT = '%(levelname)s\t%(asctime)s %(module)s.%(funcName)s:%(lineno)d\t%(message)s'
LOG_DATE_FORMAT = '%H:%M:%S'
#Logs WARNING messages and anything above to sys.stdout
logging.basicConfig(level = logging.INFO, stream = sys.stdout, format = LOG_FORMAT, datefmt = LOG_DATE_FORMAT)
#Log ERROR messages to stderr separately; these will fail a tool run in Galaxy
STDERR_HANDLER = logging.StreamHandler(sys.stderr)
STDERR_HANDLER.setLevel(logging.ERROR)
STDERR_HANDLER.setFormatter(logging.Formatter(fmt = LOG_FORMAT, datefmt = LOG_DATE_FORMAT))
logging.root.addHandler(STDERR_HANDLER)
#Require at least version 1.53 op BioPython
assert 1.54 <= float(Bio.__version__), 'BioPython version 1.54 or higher is required'
#Using the standard NCBI Bacterial, Archaeal and Plant Plastid Code translation table (11)
CODON_TABLE_ID = 11
#Base output dir
BASE_OUTPUT_PATH = '../divergence-cache/'
def create_directory(dirname, inside_dir = BASE_OUTPUT_PATH):
"""Create a directory in the default output directory, and return the full path to the directory.
Return directory if directory already exists, raise error if file by that name already exists."""
filename = os.path.join(inside_dir, dirname)
#For non-absolute paths, get filename relative to this module
if filename[0] != '/':
filename = resource_filename(__name__, filename)
#If file exists and is a directory, return the existing directory unaltered
if os.path.exists(filename):
if os.path.isdir(filename):
return filename
else:
raise IOError('Could not create directory {0}\nA file with that name already exists.')
else:
os.makedirs(filename)
return filename
#Initialize shared cache for files downloaded through httplib2
HTTP_CACHE = httplib2.Http(create_directory('.cache'))
def concatenate(target_path, source_files):
"""Concatenate arbitrary number of files into target_path by reading and writing in binary mode.
WARNING: The binary mode implies new \n characters will NOT be added in between files!"""
with open(target_path, mode = 'wb') as write_handle:
for source_file in source_files:
shutil.copyfileobj(open(source_file, mode = 'rb'), write_handle)
assert os.path.isfile(target_path) and 0 < os.path.getsize(target_path), target_path + ' should exist with content'
def create_archive_of_files(archive_file, file_iterable):
"""Write files in file_iterable to archive_file, using only filename for target path within archive_file."""
zipfile_handle = ZipFile(archive_file, mode = 'w', compression = ZIP_DEFLATED)
if len(file_iterable):
for some_file in file_iterable:
zipfile_handle.write(some_file, os.path.split(some_file)[1])
else:
logging.warn('No files in file_iterable: %s will be empty!', archive_file)
zipfile_handle.writestr('empty', '')
zipfile_handle.close()
assert is_zipfile(archive_file), 'File should now have been a valid zipfile: ' + archive_file
def extract_archive_of_files(archive_file, target_dir):
"""Extract all files from archive to target directory, and return list of files extracted."""
extracted_files = []
read_handle = ZipFile(archive_file, mode = 'r')
for zipinfo in read_handle.infolist():
extracted_path = read_handle.extract(zipinfo, path = target_dir)
extracted_files.append(extracted_path)
read_handle.close()
assert extracted_files, 'At least some files should have been read from ' + archive_file
return extracted_files
def parse_options(usage, options, args):
"""Parse command line arguments in args. Options require argument by default; flags are indicated with '?' postfix.
Parameters:
usage -- Usage string detailing command line arguments
options -- List of command line arguments to parse
args -- Command line arguments supplied
"""
#Extract flags from options
flags = [opt[:-1] for opt in options if opt[-1] == '?']
try:
#Add postfix '=' for options that require an argument & add flags without postfix
long_options = [opt + '=' for opt in options if opt[-1] != '?']
long_options += flags
#Call getopt with long arguments only
tuples, remainder = getopt.getopt(args, '', long_options)
#If there's a remainder, not all arguments were recognized
if remainder:
raise getopt.GetoptError('Unrecognized argument(s) passed: ' + str(remainder), remainder)
arguments = dict((opt[2:], value) for opt, value in tuples)
except getopt.GetoptError as err:
#Print error & usage information to stderr
print >> sys.stderr, str(err)
print >> sys.stderr, usage
sys.exit(1)
#Remove postfixes '=?' and '?' from options, and '=' postfix from flags
options = [opt[:-2] if opt[-2:] == '=?' else opt[:-1] if opt[-1] == '?' else opt for opt in options]
flags = [flag[:-1] if flag[-1] == '=' else flag for flag in flags]
#Correctly set True/False values for flags, regardless of whether flag was already passed as argument or not
for flag in flags:
if flag in arguments:
#Only overwrite with True if value is empty, as optional arguments (flags) can have values as well
if not arguments[flag]:
arguments[flag] = True
else:
arguments[flag] = False
#Ensure all arguments were provided
for opt in options:
if opt not in arguments:
print >> sys.stderr, 'Mandatory argument {0} not provided'.format(opt)
print >> sys.stderr, usage
sys.exit(1)
#Retrieve & return file paths from dictionary in order of options
return [arguments[option] for option in options]
| Python | 0 | @@ -5498,16 +5498,50 @@
ainder:%0A
+ print '%5Cn'.join(args)%0A
|
9af7c8bfc22a250ce848d50ca26877e177f767c1 | Fix execution on Monday | management.py | management.py | from logging import _nameToLevel as nameToLevel
from argparse import ArgumentParser
from Common.emailer import Emailer
from DesksReminder.reminders import HelpDeskTechReminder, HelpDeskLabReminder, HelpDeskOtherReminder, \
UrgentDeskReminder, AccountsDeskReminder
from HelpDesk.synchronization import AskbotSync, HelpDeskCaretaker
from HelpDesk.stackoverflowsync import StackOverflowSync
from urllib3 import disable_warnings
from urllib3.exceptions import InsecureRequestWarning
from datetime import datetime
__author__ = 'Fernando López'
__version__ = "1.3.0"
def init():
parser = ArgumentParser(prog='Jira Management Scripts', description='')
parser.add_argument('-l',
'--log',
default='INFO',
help='The logging level to be used.')
args = parser.parse_args()
loglevel = None
try:
loglevel = nameToLevel[args.log.upper()]
except Exception as e:
print('Invalid log level: {}'.format(args.log))
print('Please use one of the following values:')
print(' * CRITICAL')
print(' * ERROR')
print(' * WARNING')
print(' * INFO')
print(' * DEBUG')
print(' * NOTSET')
exit()
return loglevel
if __name__ == "__main__":
loglevel = init()
mailer = Emailer(loglevel=loglevel)
disable_warnings(InsecureRequestWarning)
today = datetime.today().weekday()
if today == 2:
# Send reminder of pending JIRA tickets, only every Mondays
techReminder = HelpDeskTechReminder(loglevel=loglevel, mailer=mailer)
techReminder.process()
labReminder = HelpDeskLabReminder(loglevel=loglevel, mailer=mailer)
labReminder.process()
otherReminder = HelpDeskOtherReminder(loglevel=loglevel, mailer=mailer)
otherReminder.process()
urgentReminder = UrgentDeskReminder(loglevel=loglevel, mailer=mailer)
urgentReminder.process()
accountReminder = AccountsDeskReminder(loglevel=loglevel, mailer=mailer)
accountReminder.process()
# Askbot synchronization and Jira caretaker actions, every day
askbotSync = AskbotSync(loglevel=loglevel)
askbotSync.process()
# Automatic reassign tickets to owners based on some extracted information, every day
helpdeskCaretaker = HelpDeskCaretaker(loglevel=loglevel)
helpdeskCaretaker.process()
# StackoverFlow synchronization, every day
stackoverflowSync = StackOverflowSync(loglevel=loglevel)
stackoverflowSync.process(year=2015, month=9, day=21)
| Python | 0.000047 | @@ -1501,17 +1501,17 @@
oday ==
-2
+0
:%0A
|
ecd2821a99dee895f3ab7c5dbcc6d86983268560 | Update src url for dev in views | __init__.py | __init__.py | from flask import Flask, request, redirect, url_for
from twilio.rest import TwilioRestClient
from PIL import Image, ImageDraw, ImageFont
import time
app = Flask(__name__, static_folder='static', static_url_path='')
client = TwilioRestClient(
account='ACb01b4d6edfb1b41a8b80f5fed2c19d1a',
token='97e6b9c0074b2761eff1375fb088adda'
)
@app.route('/', methods=['GET', 'POST'])
def send_image():
if request.method == 'GET':
return 'The deployment worked! Now copy your browser URL into the' + \
' Twilio message text box for your phone number.'
sender_number = request.form.get('From', '')
twilio_number = request.form.get('To', '')
user_text = request.form.get('Body', '')
image_url, msg_text = mod_photo(user_text)
send_mms_twiml(image_url, msg_text, sender_number, twilio_number)
return 'ok'
def mod_photo(user_text):
base = Image.open('static/images/original/portland.jpg').convert('RGBA')
txt = Image.new('RGBA', base.size, (255, 255, 255, 0))
fnt = ImageFont.truetype('static/fonts/Gobold.ttf', 30)
d = ImageDraw.Draw(txt)
d.text(
(25, 25),
'{}...'.format(user_text),
font=fnt,
fill=(255, 255, 255, 255)
)
image = Image.alpha_composite(base, txt)
image.save('static/images/changed/portland_{}.jpg'.format(user_text))
try:
msg_text = '{}: Imagine yourself in Portland!'.format(user_text)
image_url = 'http://12dcb913.ngrok.com/images/changed/portland_{}.jpg'.format(user_text)
except:
msg = "Sorry, we couldn't pull a kitten, " + \
"here's a dinosaur instead!"
image_url = "https://farm1.staticflickr.com/46/" + \
"154877897_a299d80baa_b_d.jpg"
return image_url, msg_text
def send_mms_twiml(image_url, msg_text, sender_number, twilio_number):
client.messages.create(
to=sender_number,
from_=twilio_number,
body=msg_text,
media_url=image_url
)
if __name__ == "__main__":
app.run(debug=True)
| Python | 0 | @@ -1461,22 +1461,23 @@
p://
-12dcb913.ngrok
+dev.thevariable
.com
|
be0bb9e8dc4deeff2771a2583647cc4125ceb506 | Fix incorrect merge. | __init__.py | __init__.py | # Copyright (C) 2005-2007 Jelmer Vernooij <jelmer@samba.org>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Support for foreign branches (Subversion)
"""
import os
import sys
import unittest
import bzrlib
try:
from bzrlib.trace import warning
except ImportError:
# get the message out any way we can
from warnings import warn as warning
__version__ = '0.4.0'
compatible_bzr_versions = [(0,15),(0,16)]
def check_bzrlib_version(desired):
"""Check that bzrlib is compatible.
If version is < all compatible version, assume incompatible.
If version is compatible version + 1, assume compatible, with deprecations
Otherwise, assume incompatible.
"""
bzrlib_version = bzrlib.version_info[:2]
if bzrlib_version in desired:
return
try:
from bzrlib.trace import warning
except ImportError:
# get the message out any way we can
from warnings import warn as warning
if bzrlib_version < desired[0]:
warning('Installed bzr version %s is too old to be used with bzr-svn'
' %s.' % (bzrlib.__version__, __version__))
# Not using BzrNewError, because it may not exist.
raise Exception, ('Version mismatch', desired)
else:
warning('bzr-svn is not up to date with installed bzr version %s.'
' \nThere should be a newer version of bzr-svn available.'
% (bzrlib.__version__))
if not (bzrlib_version[0], bzrlib_version[1]-1) in desired:
raise Exception, 'Version mismatch'
def check_subversion_version():
"""Check that Subversion is compatible.
"""
try:
from svn.delta import svn_delta_invoke_txdelta_window_handler
except:
warning('Installed Subversion version does not have updated Python bindings. See the bzr-svn README for details.')
raise bzrlib.errors.BzrError("incompatible python subversion bindings")
def check_pysqlite_version():
"""Check that sqlite library is compatible.
"""
try:
try:
import sqlite3
except ImportError:
from pysqlite2 import dbapi2 as sqlite3
except:
warning('Needs at least Python2.5 or Python2.4 with the pysqlite2 module')
raise bzrlib.errors.BzrError("missing sqlite library")
if (sqlite3.sqlite_version_info[0] < 3 or
(sqlite3.sqlite_version_info[0] == 3 and
sqlite3.sqlite_version_info[1] < 3)):
warning('Needs at least sqlite 3.3.x')
raise bzrlib.errors.BzrError("incompatible sqlite library")
check_bzrlib_version(required_bzr_version)
check_bzrlib_version(compatible_bzr_versions)
check_subversion_version()
check_pysqlite_version()
import branch
import convert
import format
import transport
import checkout
from bzrlib.transport import register_transport
register_transport('svn://', transport.SvnRaTransport)
register_transport('svn+', transport.SvnRaTransport)
from bzrlib.bzrdir import BzrDirFormat
from bzrlib.repository import InterRepository
from fetch import InterSvnRepository
BzrDirFormat.register_control_format(format.SvnFormat)
import svn.core
subr_version = svn.core.svn_subr_version()
BzrDirFormat.register_control_format(checkout.SvnWorkingTreeDirFormat)
InterRepository.register_optimiser(InterSvnRepository)
from bzrlib.branch import Branch
from bzrlib.commands import Command, register_command, display_command, Option
from bzrlib.errors import BzrCommandError
from bzrlib.repository import Repository
import bzrlib.urlutils as urlutils
def get_scheme(schemename):
"""Parse scheme identifier and return a branching scheme."""
from scheme import BranchingScheme
ret = BranchingScheme.find_scheme(schemename)
if ret is None:
raise BzrCommandError('No such branching scheme %r' % schemename)
return ret
class cmd_svn_import(Command):
"""Convert a Subversion repository to a Bazaar repository.
"""
takes_args = ['from_location', 'to_location?']
takes_options = [Option('trees', help='Create working trees'),
Option('shared', help='Create shared repository'),
Option('all', help='Convert all revisions, even those not in current branch history (implies --shared)'),
Option('scheme', type=get_scheme,
help='Branching scheme (none, trunk, or trunk-INT)')]
@display_command
def run(self, from_location, to_location=None, trees=False,
shared=False, scheme=None, all=False):
from convert import convert_repository
from scheme import TrunkBranchingScheme
if scheme is None:
scheme = TrunkBranchingScheme()
if to_location is None:
to_location = os.path.basename(from_location.rstrip("/\\"))
if all:
shared = True
convert_repository(from_location, to_location, scheme, shared, trees,
all)
register_command(cmd_svn_import)
class cmd_svn_upgrade(Command):
"""Upgrade the revisions mapped from Subversion in a Bazaar branch.
This will change the revision ids of revisions whose parents
were mapped from svn revisions.
"""
takes_args = ['svn_repository?']
takes_options = [Option('allow-changes', help='Allow content changes')]
@display_command
def run(self, svn_repository=None, allow_changes=False):
from upgrade import upgrade_branch
branch_to = Branch.open(".")
stored_loc = branch_to.get_parent()
if svn_repository is None:
if stored_loc is None:
raise BzrCommandError("No pull location known or"
" specified.")
else:
display_url = urlutils.unescape_for_display(stored_loc,
self.outf.encoding)
self.outf.write("Using saved location: %s\n" % display_url)
svn_repository = stored_loc
upgrade_branch(branch_to, Repository.open(svn_repository),
allow_changes)
register_command(cmd_svn_upgrade)
def test_suite():
from unittest import TestSuite, TestLoader
import tests
suite = TestSuite()
suite.addTest(tests.test_suite())
return suite
if __name__ == '__main__':
print ("This is a Bazaar plugin. Copy this directory to ~/.bazaar/plugins "
"to use it.\n")
runner = unittest.TextTestRunner()
runner.run(test_suite())
else:
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
| Python | 0.000001 | @@ -3221,51 +3221,8 @@
%22)%0A%0A
-check_bzrlib_version(required_bzr_version)%0A
chec
|
49ba0e32d8d27b997f80d5420ff6b05ae1cc4461 | move stuff around in init | __init__.py | __init__.py | # -*- coding: utf-8 -*-
from flask import (Flask,
request,
render_template,
redirect,
url_for,
jsonify)
import json
from ming import (create_datastore,
Session,
collection,
Field,
Document,
schema)
from bson.objectid import ObjectId
app = Flask(__name__)
app.config['STATIC_FOLDER'] = 'static'
from models import (BookModel,
InproceedingsModel,
ArticleModel,
Book,
Inproceedings,
Article)
from database_operations import (Enc,
delete_book,
delete_inproceedings,
delete_article,
edit_book,
edit_inproceedings,
edit_article,
add_book,
add_inproceedings,
add_article,
get_book,
get_inproceedings,
get_article,
get_index_content,
list_books,
list_inproceedings,
list_articles)
@app.route('/')
def main():
return render_template('index.html',
content = get_index_content(True))
@app.route('/bibtex')
def get_all_bibtex():
all_items = get_index_content(False)
s = ''
for b in all_items['books']:
s += (b.bibtex + '<br>')
for a in all_items['articles']:
s += a.bibtex + '<br>'
for i in all_items['inproceedings']:
s += i.bibtex + '<br>'
return s
@app.route('/show_single_bibtex/<db_type>/<db_id>')
def show_single_bibtex(db_type, db_id):
if db_type == 'book':
return get_book(db_id).bibtex
elif db_type == 'article':
return get_article(db_id).bibtex
elif db_type == 'inproceedings':
return get_inproceedings(db_id).bibtex
else:
return 'invalid'
@app.route('/add_book', methods=['GET', 'POST'])
def book_adding():
if request.method == 'GET':
return render_template('add_book.html')
else:
r = request.form
add_book(r['title'],
r['author'],
r['pages'],
r['year'],
r['publisher'])
return redirect('/')
@app.route('/add_inproceedings', methods=['GET', 'POST'])
def inproceedings_adding():
if request.method == 'GET':
return render_template('add_inproceedings.html')
else:
r = request.form
add_inproceedings(r['author'],
r['title'],
r['school'],
r['year'])
return redirect('/')
@app.route('/add_article', methods=['GET', 'POST'])
def article_adding():
if request.method == 'GET':
return render_template('add_article.html')
else:
r = request.form
add_article(r['author'],
r['title'],
r['journal'],
r['year'],
r['volume'])
return redirect('/')
@app.route('/delete_book/<b_id>')
def book_deleting(b_id):
delete_book(b_id)
return redirect('/')
@app.route('/delete_inproceedings/<i_id>')
def inproceedings_deleting(i_id):
delete_inproceedings(i_id)
return redirect('/')
@app.route('/delete_article/<a_id>')
def article_deleting(a_id):
delete_article(a_id)
return redirect('/')
@app.route('/edit_book/<b_id>', methods=['GET', 'POST'])
def book_editing(b_id):
if request.method == 'GET':
b = get_book(b_id)
return render_template('edit_book.html', book = Enc().encode(b))
else:
r = request.form
edit_book(r['title'],
r['author'],
r['pages'],
r['year'],
r['publisher'],
r['db_id'])
return redirect('/')
@app.route('/edit_inproceedings/<i_id>', methods=['GET', 'POST'])
def inproceedings_editing(b_id):
if request.method == 'GET':
i = get_inproceedings(i_id)
return render_template('edit_inproceedings.html', inproceedings = Enc().encode(i))
else:
r = request.form
edit_inproceedings(r['author'],
r['title'],
r['school'],
r['year'],
r['db_id'])
return redirect('/')
@app.route('/edit_article/<a_id>', methods=['GET', 'POST'])
def article_editing(a_id):
if request.method == 'GET':
a = get_article(a_id)
return render_template('edit_article.html', article = Enc().encode(a))
else:
r = request.form
edit_article(r['author'],
r['title'],
r['journal'],
r['year'],
r['volume'],
r['db_id'])
return redirect('/')
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
| Python | 0.000001 | @@ -314,70 +314,8 @@
Id%0A%0A
-app = Flask(__name__)%0Aapp.config%5B'STATIC_FOLDER'%5D = 'static'%0A%0A
from
@@ -859,16 +859,78 @@
icles)%0A%0A
+app = Flask(__name__)%0Aapp.config%5B'STATIC_FOLDER'%5D = 'static'%0A%0A
@app.rou
@@ -1182,17 +1182,16 @@
s +=
-(
b.bibtex
@@ -1199,17 +1199,16 @@
+ '%3Cbr%3E'
-)
%0A for
@@ -3295,24 +3295,25 @@
_book(b_id)%0A
+%0A
retu
@@ -3777,24 +3777,25 @@
dings(i_id)%0A
+%0A
retu
@@ -4237,24 +4237,25 @@
ticle(a_id)%0A
+%0A
retu
|
598bb39414825ff8ab561babb470b85f06c58020 | Update __init__.py | __init__.py | __init__.py | from mlpack import linear_regression
from mlpack import logistic_regression
"""
MlPack
======
Provides
1. A Variety of Machine learning packages
2. Good and Easy hand written programs with good documentation
3. Linear Regression, Logistic Regression
Available subpackages
---------------------
1. Linear Regression
2. Logistic Regression
See subpackages for more details.
"""
| Python | 0 | @@ -1,27 +1,45 @@
from mlpack
+.linear_regression
import linear_r
@@ -59,16 +59,36 @@
m mlpack
+.logistic_regression
import
|
b8d0344f0ca5c906e43d4071bc27a8d2acf114d1 | bump version | webmpris/__init__.py | webmpris/__init__.py | __version__ = '1.0'
__description__ = 'REST API to control media players via MPRIS2 interfaces'
requires = [
'pympris'
]
README = """webmpris is a REST API
to control media players via MPRIS2 interfaces.
Supported intefaces:
org.mpris.MediaPlayer2 via /players/<id>/Root
org.mpris.MediaPlayer2.Player via /players/<id>/Player
org.mpris.MediaPlayer2.TrackList via /players/<id>/TrackList
org.mpris.MediaPlayer2.Playlists via /players/<id>/Playlists
"""
| Python | 0 | @@ -14,9 +14,9 @@
'1.
-0
+1
'%0A__
|
9acf7857167bb87438c7c0bebca1a7eda93ac23b | Make saml2idp compatible with Django 1.9 | saml2idp/registry.py | saml2idp/registry.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
"""
Registers and loads Processor classes from settings.
"""
# Python imports
import logging
# Django imports
from django.utils.importlib import import_module
from django.core.exceptions import ImproperlyConfigured
# Local imports
from . import exceptions
from . import saml2idp_metadata
# Setup logging
logger = logging.getLogger(__name__)
def get_processor(config):
"""
Get an instance of the processor with config.
"""
dottedpath = config['processor']
try:
dot = dottedpath.rindex('.')
except ValueError:
raise ImproperlyConfigured('%s isn\'t a processors module' % dottedpath)
sp_module, sp_classname = dottedpath[:dot], dottedpath[dot+1:]
try:
mod = import_module(sp_module)
except ImportError, e:
raise ImproperlyConfigured('Error importing processors %s: "%s"' % (sp_module, e))
try:
sp_class = getattr(mod, sp_classname)
except AttributeError:
raise ImproperlyConfigured('processors module "%s" does not define a "%s" class' % (sp_module, sp_classname))
instance = sp_class(config)
return instance
def find_processor(request):
"""
Returns the Processor instance that is willing to handle this request.
"""
for name, sp_config in saml2idp_metadata.SAML2IDP_REMOTES.items():
proc = get_processor(sp_config)
try:
if proc.can_handle(request):
return proc
except exceptions.CannotHandleAssertion as exc:
# Log these, but keep looking.
logger.debug('%s %s' % (proc, exc))
raise exceptions.CannotHandleAssertion('None of the processors in SAML2IDP_REMOTES could handle this request.')
| Python | 0 | @@ -121,75 +121,29 @@
%22%22%22%0A
-# Python imports%0Aimport logging%0A# Django imports%0Afrom django.utils.
+import logging%0A%0Afrom
impo
@@ -169,16 +169,17 @@
_module%0A
+%0A
from dja
@@ -230,23 +230,8 @@
red%0A
-# Local imports
%0Afro
@@ -289,23 +289,8 @@
ta%0A%0A
-# Setup logging
%0Alog
@@ -324,16 +324,17 @@
ame__)%0A%0A
+%0A
def get_
|
4b335fbd082c34f631f903574bedb355f330fa63 | Update gamess.py for python 2.6 format | src/parser/gamess_us.py | src/parser/gamess_us.py | # __
# /__ _. ._ _ _ _ _ _
# \_| (_| | | | (/_ _> _> |_| _>
#
from src.parser_handler import get_dict_ele
import re
def parse_basis_data_gamess_us(data, name, des, elts, debug=False):
"""Parse the basis data raw html of gamess-us to get a nice tuple
Return (name, description, [[ele, data_ele],...])"""
basis_data = []
b = data.find("$DATA")
e = data.find("$END")
if (b == -1 or data.find("$DATA$END") != -1):
if debug:
print data
raise Exception("WARNING not DATA")
else:
dict_replace = {"PHOSPHOROUS": "PHOSPHORUS",
"D+": "E+",
"D-": "E-"}
for k, v in dict_replace.iteritems():
data = data.replace(k, v)
data = data[b + 5:e - 1].split('\n\n')
dict_ele = get_dict_ele()
for (elt, data_elt) in zip(elts, data):
elt_long_th = dict_ele[elt.lower()]
elt_long_exp = data_elt.split()[0].lower()
if "$" in data_elt:
if debug:
print "Eror",
raise Exception("WARNING bad split")
if elt_long_th == elt_long_exp:
basis_data.append([elt, data_elt.strip()])
else:
if debug:
print "th", elt_long_th
print "exp", elt_long_exp
print "abv", elt
raise Exception("WARNING not a good ELEMENT")
return (name, des, basis_data)
symmetry_regex = re.compile(ur'^(\w)\s+\d+\b')
def l_symmetry_gamess_us(atom_basis):
"""
Return the begin and the end of all the type of orbital
input: atom_basis = [name, S 1, 12 0.12 12212, ...]
output: [ [type, begin, end], ...]
"""
# Example
# [[u'S', 1, 5], [u'L', 5, 9], [u'L', 9, 12], [u'D', 16, 18]]"
l = []
for i, line in enumerate(atom_basis):
# Optimisation for not seaching all the time
if len(line) < 10:
m = re.search(symmetry_regex, line)
if m:
# Cause of L !
read_symmetry = m.group(1)
# L is real L or special SP
# Just check the number of exponant
if all([read_symmetry == "L",
len(atom_basis[i + 1].split()) == 4]):
real_symmetry = "SP"
else:
real_symmetry = read_symmetry
l.append([real_symmetry, i])
try:
l[-2].append(i)
except IndexError:
pass
l[-1].append(i + 1)
return l
def handle_l_gamess_us(l_atom_basis):
"""
Read l_atom_basis and change the SP in L and P
"""
l_data = []
for atom_basis in l_atom_basis:
# Split the data in line
l_line_raw = atom_basis.split("\n")
l_line = [l_line_raw[0]]
# l_line_raw[0] containt the name of the Atom
for symmetry, begin, end in l_symmetry_gamess_us(l_line_raw):
if symmetry == "SP":
body_s = []
body_p = []
for i_l in l_line_raw[begin + 1:end]:
# one L => S & P
a = i_l.split()
common = "{:>3}".format(a[0])
common += "{:>15.7f}".format(float(a[1]))
tail_s = common + "{:>23.7f}".format(float(a[2]))
body_s.append(tail_s)
tail_p = common + "{:>23.7f}".format(float(a[3]))
body_p.append(tail_p)
l_line += [l_line_raw[begin].replace("L", "S")]
l_line += body_s
l_line += [l_line_raw[begin].replace("L", "P")]
l_line += body_p
else:
l_line += l_line_raw[begin:end]
l_data.append("\n".join(l_line))
return l_data
def check_gamess(str_type):
"""Check is the orbital type is handle by gamess"""
assert len(str_type) == 1
if str_type in "S P D".split():
return True
elif str_type == "SP":
raise BaseException
else:
return True
| Python | 0.000001 | @@ -3294,16 +3294,17 @@
mon = %22%7B
+0
:%3E3%7D%22.fo
@@ -3346,16 +3346,17 @@
on += %22%7B
+0
:%3E15.7f%7D
@@ -3410,32 +3410,33 @@
_s = common + %22%7B
+0
:%3E23.7f%7D%22.format
@@ -3532,16 +3532,17 @@
mon + %22%7B
+0
:%3E23.7f%7D
|
b8cd1b6869651cd0cbe2cbeebc59c641f13e0e5b | Add todo for scopes permissions | polyaxon/scopes/permissions/scopes.py | polyaxon/scopes/permissions/scopes.py | from scopes.authentication.ephemeral import is_ephemeral_user
from scopes.authentication.internal import is_internal_user
from scopes.permissions.base import PolyaxonPermission
class ScopesPermission(PolyaxonPermission):
"""
Scopes based Permissions, depends on the authentication backend.
"""
ENTITY = None
SCOPE_MAPPING = None
@staticmethod
def _check_internal_or_ephemeral(request):
return any([is_ephemeral_user(request.user), is_internal_user(request.user)])
def has_permission(self, request, view):
if not request.auth:
if not request.user.is_authenticated:
return False
# Session users are granted total access
return True
if request.user.is_authenticated and request.user.is_superuser:
return True
allowed_scopes = set(self.SCOPE_MAPPING.get(request.method, []))
if not allowed_scopes:
return True
current_scopes = request.auth.scopes
return any(s in allowed_scopes for s in current_scopes)
| Python | 0 | @@ -720,32 +720,155 @@
return True%0A%0A
+ # TODO Add internal/ephemeral here%0A # (if that type of auth is allowed, then we should not check he scope)%0A%0A
if reque
|
ebacfc3ffe1cd1c9c58908c1f9dd78fe9eca9acd | fix for lambton not needed | ca_on_lambton/people.py | ca_on_lambton/people.py | from pupa.scrape import Scraper
from utils import lxmlize, CanadianLegislator as Legislator
import re
COUNCIL_PAGE = 'http://www.lambtononline.ca/home/government/accessingcountycouncil/countycouncillors/Pages/default.aspx'
SGC = {
'St. Clair' : '3538003',
'Dawn-Euphemia' : '3538007',
'Brooke-Alvinston' : '3538015',
'Enniskillen' : '3538016',
'Oil Springs' : '3538018',
'Petrolia' : '3538019',
'Sarnia' : '3538030',
'Point Edward' : '3538031',
'Plympton-Wyoming' : '3538035',
'Lambton Shores' : '3538040',
'Warwick' : '3538043',
}
class LambtonPersonScraper(Scraper):
def get_people(self):
page = lxmlize(COUNCIL_PAGE)
councillors = page.xpath('//div[@id="WebPartWPQ1"]/table/tbody/tr[1]')
for councillor in councillors:
node = councillor.xpath('.//td[1]//strong//strong//strong//strong') or councillor.xpath('.//td[1]//strong')
text = node[0].text_content()
name = text.strip().replace('Deputy ', '').replace('Warden ', '').replace('Mayor', '')
role = text.replace(name, '').strip()
if not role:
role = 'Councillor'
if ',' in name:
name = name.split(',')[0].strip()
district = councillor.xpath('.//td[1]//p[contains(text(),",")]/text()')[0].split(',')[1].strip()
district = re.sub(r'\A(?:City|Municipality|Town|Township|Village) of\b| Township\Z', '', district)
p = Legislator(name=name, post_id=district, role=role)
p.add_source(COUNCIL_PAGE)
p.image = councillor.xpath('.//td[1]//img/@src')[0]
info = councillor.xpath('.//td[2]')[0].text_content()
residential_info = re.findall(r'(?<=Residence:)(.*)(?=Municipal Office:)', info, flags=re.DOTALL)[0]
self.get_contacts(residential_info, 'residence', p)
municipal_info = re.findall(r'(?<=Municipal Office:)(.*)', info, flags=re.DOTALL)[0]
self.get_contacts(municipal_info, 'legislature', p)
# Needed for Represent integration.
p.add_extra('sgc', SGC[district.strip()])
yield p
def get_contacts(self, text, note, councillor):
address = text.split('Telephone')[0]
text = text.replace(address, '').split(':')
for i, contact in enumerate(text):
if i == 0:
continue
contact_type = next(x.strip() for x in re.findall(r'[A-Za-z ]+', text[i - 1]) if x.strip() and x.strip() != 'ext')
if '@' in contact:
contact = contact.strip()
else:
contact = re.findall(r'[0-9]{3}[- ][0-9]{3}-[0-9]{4}(?: ext\. [0-9]+)?', contact)[0].replace(' ', '-')
if 'Fax' in contact_type:
councillor.add_contact('fax', contact, note)
elif 'Tel' in contact_type:
councillor.add_contact('voice', contact, note)
elif 'email' in contact_type:
councillor.add_contact('email', contact, None)
else:
councillor.add_contact(contact_type, contact, note)
| Python | 0 | @@ -224,393 +224,8 @@
x'%0A%0A
-SGC = %7B%0A 'St. Clair' : '3538003',%0A 'Dawn-Euphemia' : '3538007',%0A 'Brooke-Alvinston' : '3538015',%0A 'Enniskillen' : '3538016',%0A 'Oil Springs' : '3538018',%0A 'Petrolia' : '3538019',%0A 'Sarnia' : '3538030',%0A 'Point Edward' : '3538031',%0A 'Plympton-Wyoming' : '3538035',%0A 'Lambton Shores' : '3538040',%0A 'Warwick' : '3538043',%0A%7D%0A%0A
clas
@@ -1566,99 +1566,8 @@
p)%0A%0A
- # Needed for Represent integration.%0A p.add_extra('sgc', SGC%5Bdistrict.strip()%5D)%0A%0A
|
a8d701c75e4e0b880a7d3fedb6464aa875958f70 | Version 0.99.3 | __init__.py | __init__.py | # Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
pyUnRAR2 is a ctypes based wrapper around the free UnRAR.dll.
It is an modified version of Jimmy Retzlaff's pyUnRAR - more simple,
stable and foolproof.
Notice that it has INCOMPATIBLE interface.
It enables reading and unpacking of archives created with the
RAR/WinRAR archivers. There is a low-level interface which is very
similar to the C interface provided by UnRAR. There is also a
higher level interface which makes some common operations easier.
"""
__version__ = '0.99.2'
try:
WindowsError
in_windows = True
except NameError:
in_windows = False
if in_windows:
from windows import RarFileImplementation
else:
from unix import RarFileImplementation
import fnmatch, time, weakref
class RarInfo(object):
"""Represents a file header in an archive. Don't instantiate directly.
Use only to obtain information about file.
YOU CANNOT EXTRACT FILE CONTENTS USING THIS OBJECT.
USE METHODS OF RarFile CLASS INSTEAD.
Properties:
index - index of file within the archive
filename - name of the file in the archive including path (if any)
datetime - file date/time as a struct_time suitable for time.strftime
isdir - True if the file is a directory
size - size in bytes of the uncompressed file
comment - comment associated with the file
Note - this is not currently intended to be a Python file-like object.
"""
def __init__(self, rarfile, data):
self.rarfile = weakref.proxy(rarfile)
self.index = data['index']
self.filename = data['filename']
self.isdir = data['isdir']
self.size = data['size']
self.datetime = data['datetime']
self.comment = data['comment']
def __str__(self):
try :
arcName = self.rarfile.archiveName
except ReferenceError:
arcName = "[ARCHIVE_NO_LONGER_LOADED]"
return '<RarInfo "%s" in "%s">' % (self.filename, arcName)
class RarFile(RarFileImplementation):
def __init__(self, archiveName, password=None):
"""Instantiate the archive.
archiveName is the name of the RAR file.
password is used to decrypt the files in the archive.
Properties:
comment - comment associated with the archive
>>> print RarFile('test.rar').comment
This is a test.
"""
self.archiveName = archiveName
RarFileImplementation.init(self, password)
def __del__(self):
self.destruct()
def infoiter(self):
"""Iterate over all the files in the archive, generating RarInfos.
>>> import os
>>> for fileInArchive in RarFile('test.rar').infoiter():
... print os.path.split(fileInArchive.filename)[-1],
... print fileInArchive.isdir,
... print fileInArchive.size,
... print fileInArchive.comment,
... print tuple(fileInArchive.datetime)[0:5],
... print time.strftime('%a, %d %b %Y %H:%M', fileInArchive.datetime)
test True 0 None (2003, 6, 30, 1, 59) Mon, 30 Jun 2003 01:59
test.txt False 20 None (2003, 6, 30, 2, 1) Mon, 30 Jun 2003 02:01
this.py False 1030 None (2002, 2, 8, 16, 47) Fri, 08 Feb 2002 16:47
"""
for params in RarFileImplementation.infoiter(self):
yield RarInfo(self, params)
def infolist(self):
"""Return a list of RarInfos, descripting the contents of the archive."""
return list(self.infoiter())
def read_files(self, condition='*'):
"""Read specific files from archive into memory.
If "condition" is a list of numbers, then return files which have those positions in infolist.
If "condition" is a string, then it is treated as a wildcard for names of files to extract.
If "condition" is a function, it is treated as a callback function, which accepts a RarInfo object
and returns boolean True (extract) or False (skip).
If "condition" is omitted, all files are returned.
Returns list of tuples (RarInfo info, str contents)
"""
checker = condition2checker(condition)
return RarFileImplementation.read_files(self, checker)
def extract(self, condition='*', path='.', withSubpath=True, overwrite=True):
"""Extract specific files from archive to disk.
If "condition" is a list of numbers, then extract files which have those positions in infolist.
If "condition" is a string, then it is treated as a wildcard for names of files to extract.
If "condition" is a function, it is treated as a callback function, which accepts a RarInfo object
and returns either boolean True (extract) or boolean False (skip).
DEPRECATED: If "condition" callback returns string (only supported for Windows) -
that string will be used as a new name to save the file under.
If "condition" is omitted, all files are extracted.
"path" is a directory to extract to
"withSubpath" flag denotes whether files are extracted with their full path in the archive.
"overwrite" flag denotes whether extracted files will overwrite old ones. Defaults to true.
Returns list of RarInfos for extracted files."""
checker = condition2checker(condition)
return RarFileImplementation.extract(self, checker, path, withSubpath, overwrite)
def condition2checker(condition):
"""Converts different condition types to callback"""
if type(condition) in [str, unicode]:
def smatcher(info):
return fnmatch.fnmatch(info.filename, condition)
return smatcher
elif type(condition) in [list, tuple] and type(condition[0]) in [int, long]:
def imatcher(info):
return info.index in condition
return imatcher
elif callable(condition):
return condition
else:
raise TypeError
| Python | 0 | @@ -1641,17 +1641,17 @@
= '0.99.
-2
+3
'%0D%0A%0D%0Atry
|
c202a3a945453a4955f0acbf369227f8c9cee148 | Rename link in init | __init__.py | __init__.py | import os
from .dataset import *
__path__ = [os.path.join(os.path.dirname(__file__), 'dataset')]
| Python | 0 | @@ -10,23 +10,25 @@
%0A%0Afrom .
-dataset
+batchflow
import
@@ -86,15 +86,17 @@
), '
-dataset
+batchflow
')%5D%0A
|
4a4731eda22170a77bb24dd3c7fc8ff4cafecf9d | bump version to 2.7b1 | __init__.py | __init__.py | """distutils
The main package for the Python Module Distribution Utilities. Normally
used from a setup script as
from distutils.core import setup
setup (...)
"""
__revision__ = "$Id$"
# Distutils version
#
# Updated automatically by the Python release process.
#
#--start constants--
__version__ = "2.7a4"
#--end constants--
| Python | 0 | @@ -311,10 +311,10 @@
%222.7
-a4
+b1
%22%0A#-
|
1ae0b75fb909b3fa1bd42702d4ab2a943a8f7155 | Version bump for 3.5.0b1. | __init__.py | __init__.py | """distutils
The main package for the Python Module Distribution Utilities. Normally
used from a setup script as
from distutils.core import setup
setup (...)
"""
# Distutils version
#
# Updated automatically by the Python release process.
#
#--start constants--
__version__ = "3.5.0a4"
#--end constants--
| Python | 0 | @@ -290,10 +290,10 @@
.5.0
-a4
+b1
%22%0A#-
|
bc43827ee733af9c37ca3b97b471ec1d2cde294b | Add unsubcribed handler to server. | echidna/server.py | echidna/server.py | import json
from cyclone.web import Application, RequestHandler, HTTPError
from cyclone.websocket import WebSocketHandler
from echidna.cards.memory_store import InMemoryCardStore
class EchidnaServer(Application):
def __init__(self, root, **settings):
self.store = InMemoryCardStore()
handlers = [
(r"/", root),
(r"/publish/(?P<channel>.*)/", PublicationHandler,
dict(store=self.store)),
(r"/subscribe", SubscriptionHandler,
dict(store=self.store)),
]
Application.__init__(self, handlers, **settings)
class PublicationHandler(RequestHandler):
def initialize(self, store):
self.store = store
def post(self, channel):
try:
channel = self.decode_argument(channel, "channel")
except:
raise HTTPError(400, "Invalid value for channel.")
try:
card = json.loads(self.request.body)
except:
raise HTTPError(400, "Invalid card in request body.")
self.store.publish(channel, card)
self.set_header("Content-Type", "application/json")
self.write(json.dumps({"success": True}))
class SubscriptionHandler(WebSocketHandler):
def initialize(self, store):
self.store = store
self.client = None
def _set_client(self, client):
self.client = client
def connectionMade(self, *args, **kw):
d = self.store.create_client(self.on_publish)
return d.addCallback(self._set_client)
def connectionLost(self, reason):
if self.client is not None:
return self.store.remove_client(self.client)
def messageReceived(self, msg):
try:
msg = json.loads(msg)
except:
return
if not isinstance(msg, dict):
return
msg_type = msg.get("msg_type", "invalid")
if not isinstance(msg_type, unicode):
return
handler = getattr(self, "handle_" + msg_type, self.handle_invalid)
handler(msg)
def on_publish(self, channel_name, card):
return self.send_card(channel_name, card)
def send_card(self, channel_name, card):
msg = {
"msg_type": "card",
"channel": channel_name,
"card": card,
}
self.sendMessage(json.dumps(msg))
def send_error(self, reason, **data):
msg = {
"msg_type": "error",
"reason": reason,
}
msg.update(data)
self.sendMessage(json.dumps(msg))
def send_cards(self, channel_name, cards):
for card in cards:
self.on_publish(channel_name, card)
def handle_subscribe(self, msg):
channel_name = msg.get("channel")
if not isinstance(channel_name, unicode):
return
d = self.store.subscribe(channel_name, self.client)
return d.addCallback(
lambda cards: self.send_cards(channel_name, cards))
def handle_invalid(self, msg):
self.send_error("invalid message", original_message=msg)
| Python | 0 | @@ -2971,24 +2971,255 @@
e, cards))%0A%0A
+ def handle_unsubscribed(self, msg):%0A channel_name = msg.get(%22channel%22)%0A if not isinstance(channel_name, unicode):%0A return%0A d = self.store.unsubscribe(channel_name, self.client)%0A return d%0A%0A
def hand
|
9940212f5d0cf4860d0dc092dc55031218de490b | Fix test return type | gym/monitoring/tests/test_monitor.py | gym/monitoring/tests/test_monitor.py | import glob
import os
import gym
from gym import error, spaces
from gym import monitoring
from gym.monitoring import monitor
from gym.monitoring.tests import helpers
class FakeEnv(gym.Env):
def _render(self, close=True):
raise RuntimeError('Raising')
def test_monitor_filename():
with helpers.tempdir() as temp:
env = gym.make('CartPole-v0')
env.monitor.start(temp)
env.monitor.close()
manifests = glob.glob(os.path.join(temp, '*.manifest.*'))
assert len(manifests) == 1
def test_write_upon_reset_false():
with helpers.tempdir() as temp:
env = gym.make('CartPole-v0')
env.monitor.start(temp, video_callable=False, write_upon_reset=False)
env.reset()
files = glob.glob(os.path.join(temp, '*'))
assert not files, "Files: {}".format(files)
env.monitor.close()
files = glob.glob(os.path.join(temp, '*'))
assert len(files) > 0
def test_write_upon_reset_true():
with helpers.tempdir() as temp:
env = gym.make('CartPole-v0')
env.monitor.start(temp, video_callable=False, write_upon_reset=True)
env.reset()
files = glob.glob(os.path.join(temp, '*'))
assert len(files) > 0, "Files: {}".format(files)
env.monitor.close()
files = glob.glob(os.path.join(temp, '*'))
assert len(files) > 0
def test_close_monitor():
with helpers.tempdir() as temp:
env = FakeEnv()
env.monitor.start(temp)
env.monitor.close()
manifests = monitor.detect_training_manifests(temp)
assert len(manifests) == 1
def test_video_callable_true_not_allowed():
with helpers.tempdir() as temp:
env = gym.make('CartPole-v0')
try:
env.monitor.start(temp, video_callable=True)
except error.Error:
pass
else:
assert False
def test_video_callable_false_does_not_record():
with helpers.tempdir() as temp:
env = gym.make('CartPole-v0')
env.monitor.start(temp, video_callable=False)
env.reset()
env.monitor.close()
results = monitoring.load_results(temp)
assert len(results['videos']) == 0
def test_video_callable_records_videos():
with helpers.tempdir() as temp:
env = gym.make('CartPole-v0')
env.monitor.start(temp)
env.reset()
env.monitor.close()
results = monitoring.load_results(temp)
assert len(results['videos']) == 1, "Videos: {}".format(results['videos'])
def test_env_reuse():
with helpers.tempdir() as temp:
env = gym.make('CartPole-v0')
env.monitor.start(temp)
env.monitor.close()
env.monitor.start(temp, force=True)
env.reset()
env.step(env.action_space.sample())
env.step(env.action_space.sample())
env.monitor.close()
results = monitor.load_results(temp)
assert results['episode_lengths'] == [2], 'Results: {}'.format(results)
class AutoresetEnv(gym.Env):
metadata = {'semantics.autoreset': True}
def __init__(self):
self.action_space = spaces.Discrete(1)
self.observation_space = spaces.Discrete(1)
def _reset(self):
return None
def _step(self, action):
return None, 0, False, {}
gym.envs.register(
id='Autoreset-v0',
entry_point='gym.monitoring.tests.test_monitor:AutoresetEnv',
timestep_limit=2,
)
def test_env_reuse():
with helpers.tempdir() as temp:
env = gym.make('Autoreset-v0')
env.monitor.start(temp)
env.reset()
env.step(None)
_, _, done, _ = env.step(None)
assert done
env.step(None)
_, _, done, _ = env.step(None)
assert done
| Python | 0.000032 | @@ -3233,20 +3233,17 @@
return
-None
+0
%0A%0A de
@@ -3280,20 +3280,17 @@
return
-None
+0
, 0, Fal
|
9d1317231a1c5d62fddf130c78e3942a08651d42 | Fix for numpy linspace but where count must be an integer | geophys_utils/_transect_utils.py | geophys_utils/_transect_utils.py | #!/usr/bin/env python
#===============================================================================
# Copyright 2017 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
'''
Created on 23Nov.,2016
@author: u76345
'''
import numpy as np
import math
from ._crs_utils import get_utm_wkt, transform_coords
def line_length(line):
'''
Function to return length of line
@param line: iterable containing two two-ordinate iterables, e.g. 2 x 2 array or 2-tuple of 2-tuples
@return length: Distance between start & end points in native units
'''
return math.sqrt(math.pow(line[1][0] - line[0][0], 2.0) +
math.pow(line[1][1] - line[0][1], 2.0))
def point_along_line(line, distance):
'''
Function to return a point the specified distance along the line
@param line: iterable containing two two-ordinate iterables, e.g. 2 x 2 array or 2-tuple of 2-tuples
@param distance: Distance along line new point should be
@return point: Coordinates of point along line or None if distance > line length
'''
length = line_length(line)
proportion = distance / length
if proportion < 0 or proportion > 1:
return None
return tuple([line[0][dim_index] + proportion *
(line[1][dim_index] - line[0][dim_index]) for dim_index in range(2)])
def utm_coords(coordinate_array, wkt):
'''
Function to convert coordinates to the appropriate UTM CRS
@param coordinate_array: Array of shape (n, 2) or iterable containing coordinate pairs
@return wkt: WKT for UTM CRS
@return coordinate_array: Array of shape (n, 2) containing UTM coordinate pairs
'''
native_centre_coords = (np.nanmean(coordinate_array[:,0]), np.nanmean(coordinate_array[:,1]))
utm_wkt = get_utm_wkt(native_centre_coords, wkt)
return utm_wkt, np.array(transform_coords(coordinate_array, wkt, utm_wkt))
def coords2distance(coordinate_array):
'''
Function to calculate cumulative distance in metres from native (lon/lat) coordinates
@param coordinate_array: Array of shape (n, 2) or iterable containing coordinate pairs
@return distance_array: Array of shape (n) containing cumulative distances from first coord
'''
coord_count = coordinate_array.shape[0]
distance_array = np.zeros((coord_count,), coordinate_array.dtype)
cumulative_distance = 0.0
distance_array[0] = cumulative_distance
last_point = coordinate_array[0]
for coord_index in range(1, coord_count):
point = coordinate_array[coord_index]
distance = math.sqrt(math.pow(point[0] - last_point[0], 2.0) + math.pow(point[1] - last_point[1], 2.0))
distance = line_length((point, last_point))
cumulative_distance += distance
distance_array[coord_index] = cumulative_distance
last_point = point
return distance_array
def sample_transect(transect_vertices, wkt, sample_metres):
'''
Function to return a list of sample points sample_metres apart along lines between transect vertices
@param transect_vertices: list or array of transect vertex coordinates
@param wkt: coordinate reference system for transect_vertices
@param sample_metres: distance between sample points in metres
'''
transect_vertex_array = np.array(transect_vertices)
# print 'transect_vertex_array = %s' % transect_vertex_array
nominal_utm_wkt, utm_transect_vertices = utm_coords(transect_vertex_array, wkt)
# print 'nominal_utm_wkt = %s' % nominal_utm_wkt
# print 'utm_transect_vertices = %s' % utm_transect_vertices
sample_points = []
residual = 0
for vertex_index in range(len(utm_transect_vertices) - 1):
utm_line = (utm_transect_vertices[
vertex_index], utm_transect_vertices[vertex_index + 1])
# print 'utm_line = %s' % (utm_line,)
utm_line_length = line_length(utm_line)
# print 'utm_line_length = %s' % utm_line_length
# Skip lines of infinite length
if utm_line_length == float('inf'):
continue
sample_count = (utm_line_length + residual) // sample_metres
# print 'sample_count = %s' % sample_count
if not sample_count:
residual += utm_line_length
continue
if residual: # Use un-sampled distance from last line
start_point = point_along_line(
utm_line, sample_metres - residual)
else:
start_point = utm_line[0] # Start at beginning
# print 'start_point = %s' % (start_point,)
# Calculate new residual
residual = (utm_line_length + residual) % sample_metres
# print 'residual = %s' % residual
end_point = point_along_line(utm_line, utm_line_length - residual)
# print 'end_point = %s' % (end_point,)
try:
sample_point_array = np.stack([np.linspace(start_point[dim_index], end_point[
dim_index], sample_count + 1) for dim_index in range(2)]).transpose()
# print 'sample_point_array.shape = %s' %
# (sample_point_array.shape,)
except Exception as e:
print('Line sampling failed: {}'.format(e))
residual = 0
continue
sample_points += list(sample_point_array)
# Don't double up end point with next start point
if (not residual) and (vertex_index <
len(utm_transect_vertices) - 1):
sample_points.pop()
return transform_coords(
sample_points, nominal_utm_wkt, wkt), sample_metres
| Python | 0.000019 | @@ -4739,16 +4739,20 @@
count =
+int(
(utm_lin
@@ -4780,32 +4780,33 @@
// sample_metres
+)
%0A # print
|
86eb16da4a6c3579eb514fa5ca73def7be8afd84 | Add noqa codestyle | geotrek/api/v2/views/__init__.py | geotrek/api/v2/views/__init__.py | from rest_framework import response, permissions
from rest_framework.views import APIView
from django.conf import settings
from django.contrib.gis.geos import Polygon
from .authent import StructureViewSet # noqa
from .common import TargetPortalViewSet, ThemeViewSet, SourceViewSet, ReservationSystemViewSet, LabelViewSet, OrganismViewSet # noqa
if 'geotrek.core' in settings.INSTALLED_APPS:
from .core import PathViewSet # noqa
if 'geotrek.feedback' in settings.INSTALLED_APPS:
from .feedback import ReportStatusViewSet, ReportActivityViewSet, ReportCategoryViewSet, ReportProblemMagnitudeViewSet # noqa
if 'geotrek.trekking' in settings.INSTALLED_APPS:
from .trekking import (TrekViewSet, TourViewSet, POIViewSet, POITypeViewSet, AccessibilityViewSet, RouteViewSet,
DifficultyViewSet, NetworkViewSet, PracticeViewSet,
WebLinkCategoryViewSet, ServiceTypeViewSet, ServiceViewSet, TrekRatingScaleViewSet, TrekRatingViewSet) # noqa
if 'geotrek.sensitivity' in settings.INSTALLED_APPS:
from .sensitivity import SensitiveAreaViewSet # noqa
from .sensitivity import SportPracticeViewSet # noqa
from .sensitivity import SpeciesViewSet # noqa
if 'geotrek.tourism' in settings.INSTALLED_APPS:
from .tourism import TouristicContentViewSet, TouristicEventViewSet, TouristicEventTypeViewSet, InformationDeskViewSet, TouristicContentCategoryViewSet # noqa
if 'geotrek.zoning' in settings.INSTALLED_APPS:
from .zoning import CityViewSet, DistrictViewSet # noqa
if 'geotrek.outdoor' in settings.INSTALLED_APPS:
from .outdoor import (SiteViewSet, OutdoorPracticeViewSet, SiteTypeViewSet, CourseTypeViewSet,
OutdoorRatingScaleViewSet, OutdoorRatingViewSet, CourseViewSet, SectorViewSet) # noqa
if 'geotrek.flatpages' in settings.INSTALLED_APPS:
from .flatpages import FlatPageViewSet # noqa
if 'geotrek.infrastructure' in settings.INSTALLED_APPS:
from .infrastructure import InfrastructureTypeViewSet, InfrastructureViewSet, InfrastructureUsageDifficultyLevelViewSet, InfrastructureConditionViewSet, InfrastructureMaintenanceDifficultyLevelViewSet # noqa
if 'geotrek.signage' in settings.INSTALLED_APPS:
from .signage import SignageViewSet, SignageTypeViewSet, SealingViewSet, ColorViewSet, DirectionViewSet, BladeTypeViewSet # noqa
if 'drf_yasg' in settings.INSTALLED_APPS:
from .swagger import schema_view # noqa
class ConfigView(APIView):
"""
Configuration endpoint that gives the BBox used in the Geotrek configuration
"""
permission_classes = [permissions.AllowAny, ]
def get(self, request, *args, **kwargs):
bbox = Polygon.from_bbox(settings.SPATIAL_EXTENT)
bbox.srid = settings.SRID
bbox.transform(settings.API_SRID)
return response.Response({
'bbox': bbox.extent
})
| Python | 0 | @@ -768,32 +768,40 @@
t, RouteViewSet,
+ # noqa
%0A
@@ -855,32 +855,40 @@
PracticeViewSet,
+ # noqa
%0A
@@ -1699,16 +1699,24 @@
ViewSet,
+ # noqa
%0A
|
ad73ef8e4433645cd72bc92439c4d07c5d1f6455 | Add test for radian ticks | astropy/visualization/tests/test_units.py | astropy/visualization/tests/test_units.py | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import pytest
from astropy.utils.compat.optional_deps import HAS_PLT
if HAS_PLT:
import matplotlib.pyplot as plt
from astropy import units as u
from astropy.coordinates import Angle
from astropy.visualization.units import quantity_support
def teardown_function(function):
plt.close('all')
@pytest.mark.skipif('not HAS_PLT')
def test_units():
plt.figure()
with quantity_support():
buff = io.BytesIO()
plt.plot([1, 2, 3] * u.m, [3, 4, 5] * u.kg, label='label')
plt.plot([105, 210, 315] * u.cm, [3050, 3025, 3010] * u.g)
plt.legend()
# Also test fill_between, which requires actual conversion to ndarray
# with numpy >=1.10 (#4654).
plt.fill_between([1, 3] * u.m, [3, 5] * u.kg, [3050, 3010] * u.g)
plt.savefig(buff, format='svg')
assert plt.gca().xaxis.get_units() == u.m
assert plt.gca().yaxis.get_units() == u.kg
@pytest.mark.skipif('not HAS_PLT')
def test_units_errbarr():
pytest.importorskip("matplotlib")
plt.figure()
with quantity_support():
x = [1, 2, 3] * u.s
y = [1, 2, 3] * u.m
yerr = [3, 2, 1] * u.cm
fig, ax = plt.subplots()
ax.errorbar(x, y, yerr=yerr)
assert ax.xaxis.get_units() == u.s
assert ax.yaxis.get_units() == u.m
@pytest.mark.skipif('not HAS_PLT')
def test_incompatible_units():
# NOTE: minversion check does not work properly for matplotlib dev.
try:
# https://github.com/matplotlib/matplotlib/pull/13005
from matplotlib.units import ConversionError
except ImportError:
err_type = u.UnitConversionError
else:
err_type = ConversionError
plt.figure()
with quantity_support():
plt.plot([1, 2, 3] * u.m)
with pytest.raises(err_type):
plt.plot([105, 210, 315] * u.kg)
@pytest.mark.skipif('not HAS_PLT')
def test_quantity_subclass():
"""Check that subclasses are recognized.
This sadly is not done by matplotlib.units itself, though
there is a PR to change it:
https://github.com/matplotlib/matplotlib/pull/13536
"""
plt.figure()
with quantity_support():
plt.scatter(Angle([1, 2, 3], u.deg), [3, 4, 5] * u.kg)
plt.scatter([105, 210, 315] * u.arcsec, [3050, 3025, 3010] * u.g)
plt.plot(Angle([105, 210, 315], u.arcsec), [3050, 3025, 3010] * u.g)
assert plt.gca().xaxis.get_units() == u.deg
assert plt.gca().yaxis.get_units() == u.kg
@pytest.mark.skipif('not HAS_PLT')
def test_nested():
with quantity_support():
with quantity_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(Angle([1, 2, 3], u.deg), [3, 4, 5] * u.kg)
assert ax.xaxis.get_units() == u.deg
assert ax.yaxis.get_units() == u.kg
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(Angle([1, 2, 3], u.arcsec), [3, 4, 5] * u.pc)
assert ax.xaxis.get_units() == u.arcsec
assert ax.yaxis.get_units() == u.pc
@pytest.mark.skipif('not HAS_PLT')
def test_empty_hist():
with quantity_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.hist([1, 2, 3, 4] * u.mmag, bins=100)
# The second call results in an empty list being passed to the
# unit converter in matplotlib >= 3.1
ax.hist([] * u.mmag, bins=100)
| Python | 0 | @@ -210,16 +210,35 @@
t as plt
+%0Aimport numpy as np
%0A%0Afrom a
@@ -3521,28 +3521,379 @@
hist(%5B%5D * u.mmag, bins=100)%0A
+%0A%0A@pytest.mark.skipif('not HAS_PLT')%0Adef test_radian_formatter():%0A with quantity_support():%0A fig, ax = plt.subplots()%0A ax.plot(%5B1, 2, 3%5D, %5B1, 2, 3%5D * u.rad * np.pi)%0A fig.canvas.draw()%0A labels = %5Btl.get_text() for tl in ax.yaxis.get_ticklabels()%5D%0A assert labels == %5B'%CF%80/2', '%CF%80', '3%CF%80/2', '2%CF%80', '5%CF%80/2', '3%CF%80', '7%CF%80/2'%5D%0A
|
30baad1c058a3e929c88b7e10799031e0a0b4bf6 | Fix a typo in rebaseeditor comment | git_upstream/lib/rebaseeditor.py | git_upstream/lib/rebaseeditor.py | #
# Copyright (c) 2012, 2013, 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from git_upstream.lib.utils import GitMixin
from git_upstream.log import LogDedentMixin
from subprocess import call
import os
REBASE_EDITOR_SCRIPT = "rebase-editor"
# insure name of file will match any naming filters used by editors to
# enable syntax highlighting
REBASE_EDITOR_TODO = "git-upstream/git-rebase-todo"
TODO_EPILOGUE = """\
# Rebase %(shortrevisions)s onto %(shortonto)s
#
# All commands from normal rebase instructions files are supported
#
# If you remove a line, that commit will be dropped.
# Removing all commits will abort the rebase.
#
"""
class RebaseEditor(GitMixin, LogDedentMixin):
def __init__(self, interactive=False, *args, **kwargs):
self._interactive = interactive
super(RebaseEditor, self).__init__()
self._editor = REBASE_EDITOR_SCRIPT
# interactive switch here determines if the script that is given
# to git-rebase to run as it's editor, will in turn exec an editor
# for the user to look through the instructions before rebase
# applies them
if interactive == 'debug':
self.log.debug("Enabling interactive mode for rebase")
self._editor = "%s --interactive" % self.editor
@property
def editor(self):
return self._editor
def _write_todo(self, commits, *args, **kwargs):
todo_file = os.path.join(self.repo.git_dir, REBASE_EDITOR_TODO)
if os.path.exists(todo_file):
os.remove(todo_file)
if not os.path.exists(os.path.dirname(todo_file)):
os.mkdir(os.path.dirname(todo_file))
# see if onto is set in the args or kwargs
onto = kwargs.get('onto', None)
for idx, arg in enumerate(args):
if arg.startswith("--onto"):
# either onto is after the option in this arg, or it's the
# next arg, or not providing it is an exception
onto = arg[7:] or args[idx + 1]
break
root = None
with open(todo_file, "w") as todo:
for commit in commits:
if not root:
root = commit.parents[0].hexsha
subject = commit.message.splitlines()[0]
todo.write("pick %s %s\n" % (commit.hexsha[:7], subject))
# if root isn't set at this point, then there were no commits
if not root:
todo.write("noop\n")
todo.write(TODO_EPILOGUE %
{'shortrevisions': self._short_revisions(root,
commit.hexsha),
'shortonto': self._short_onto(onto or root)})
return todo_file
def _short_revisions(self, root, commit):
if not root:
return "<none>"
return "%s..%s" % (root[:7], commit[:7])
def _short_onto(self, onto):
if not onto:
return "<none>"
return self.git.rev_parse(onto)[:7]
def _set_editor(self, editor):
if self.git_sequence_editor:
self._saveeditor = self.git_sequence_editor
if self._interactive == 'debug':
os.environ['GIT_UPSTREAM_GIT_SEQUENCE_EDITOR'] = \
self._saveeditor
os.environ['GIT_SEQUENCE_EDITOR'] = editor
else:
self._saveeditor = self.git_editor
if self._interactive == 'debug':
os.environ['GIT_UPSTREAM_GIT_EDITOR'] = self._saveeditor
os.environ['GIT_EDITOR'] = editor
def _unset_editor(self):
for var in ['GIT_SEQUENCE_EDITOR', 'GIT_EDITOR']:
# GIT_UPSTREAM_* variations should only be set if script was in a
# debug mode.
if os.environ.get('GIT_UPSTREAM_' + var, None):
del os.environ['GIT_UPSTREAM_' + var]
# Restore previous editor only if the environment var is set. This
# isn't perfect since we should probably unset the env var if it
# wasn't previously set, but this shouldn't cause any problems.
if os.environ.get(var, None):
os.environ[var] = self._saveeditor
break
def run(self, commits, *args, **kwargs):
"""
Reads the list of commits given, and constructions the instructions
file to be used by rebase.
Will spawn an editor if the constructor was told to be interactive.
Additional arguments *args and **kwargs are to be passed to 'git
rebase'.
"""
todo_file = self._write_todo(commits, *args, **kwargs)
if self._interactive:
# spawn the editor
user_editor = self.git_sequence_editor or self.git_editor
status = call("%s %s" % (user_editor, todo_file), shell=True)
if status:
return status, None, "Editor returned non-zero exit code"
editor = "%s %s" % (self.editor, todo_file)
self._set_editor(editor)
try:
if self._interactive == 'debug':
# In general it's not recommended to run rebase in direct
# interactive mode because it's not possible to capture the
# stdout/stderr, but sometimes it's useful to allow it for
# debugging to check the final result.
#
# It is not safe to redirect I/O channels as most editors will
# be expecting that I/O is from/to proper terminal. YMMV
cmd = ['git', 'rebase', '--interactive']
cmd.extend(self.git.transform_kwargs(**kwargs))
cmd.extend(args)
return call(cmd), None, None
else:
return self.git.rebase(interactive=True, with_exceptions=False,
with_extended_output=True, *args,
**kwargs)
finally:
os.remove(todo_file)
# make sure to remove the environment tweaks added so as not to
# impact any subsequent use of git commands using editors
self._unset_editor()
@property
def git_sequence_editor(self):
return os.environ.get('GIT_SEQUENCE_EDITOR',
self.git.config("sequence.editor",
with_exceptions=False))
@property
def git_editor(self):
return os.environ.get("GIT_EDITOR", self.git.var("GIT_EDITOR"))
| Python | 0.008335 | @@ -790,17 +790,17 @@
tor%22%0A%0A#
-i
+e
nsure na
|
dded8beb4a075dfc44938d5355727cc4058ba80b | Fix typo | athenet/data_loader/data_loader_buffer.py | athenet/data_loader/data_loader_buffer.py | """Buffer for storing large network data."""
import numpy as np
import theano
class Buffer(object):
"""Buffer storing data from contiguous subsequence of minibatches.
Content of a buffer is a 4-dimensional floating-point tensor.
"""
def __init__(self, data_loader=None):
"""Create data Buffer.
:data_loader: Instance of DataLoader that will be using Buffer.
"""
self.begin = -1
self.end = 0
self.offset = theano.shared(0)
self.parent = data_loader
# Create a 4-dimensinal tensor shared variable for data. Exact size of
# the tensor is determined when data is set, and can change over time.
self._data = theano.shared(
np.zeros((1, 1, 1, 1), dtype=theano.config.floatX),
borrow=True)
@property
def data(self):
"""Shared variable representing data stored in a buffer."""
return self._data
def __getitem__(self, key):
"""Return minibatches of given indices.
Return data is taken from data array, however key represents
minibatch index, not direct index in data array. Effectively, buffer
can be used as if it contained all of the minibatches data.
Parent must be set before using this method, as minibatch size is
needed to determine shift that has to be uses in data array.
:key: Symbolic index or slice representing indices of minibatches to
return.
:return: Minibatches data.
"""
shift = self.offset * self.parent.batch_size
if isinstance(key, slice):
start, stop, step = key.start, key.stop, key.step
return self._data[start-shift:stop-shift:step]
else:
return self._data[key-shift]
def set(self, data, batch_index=None, n_of_batches=None):
"""Set buffer data.
:data: Data to be stored in a buffer.
:batch_index: Index of first minibatch that is contained in given
data.
:n_of_batches: Number of minibatches that are contained in given data.
"""
if batch_index:
self.begin = batch_index
self.offset.set_value(batch_index)
if n_of_batches:
self.end = batch_index + n_of_batches
self._data.set_value(
np.asarray(np.concatenate(data, axis=0),
dtype=theano.config.floatX),
borrow=True)
def contains(self, batch_index):
"""Check if minibatch is contained in a buffer.
:batch_index: Index of a minibatch.
:return: True, if minibatch of a given index is contained in a buffer.
False otherwise.
"""
return batch_index >= self.begin and batch_index < self.end
| Python | 0.999999 | @@ -1355,17 +1355,17 @@
o be use
-s
+d
in data
|
edc8761296ad65330e51e98a6cc602dc2e9033b6 | def __unicode__ for Document | deputies/models.py | deputies/models.py | # lachambre.be to json sausage machine
# Copyright (C) 2011 Laurent Peuch <cortex@worlddomination.be>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from json import dumps
from django.db import models
from djangotoolbox.fields import ListField, EmbeddedModelField
class Jsonify(object):
def json(self):
return dumps(self.__class__.objects.filter(pk=self.pk).values()[0], indent=4)
class Deputy(models.Model, Jsonify):
full_name = models.CharField(max_length=1337, unique=True)
first_name = models.CharField(max_length=1337)
last_name = models.CharField(max_length=1337)
sex = models.CharField(max_length=1337, null=True)
emails = ListField()
party = models.ForeignKey('Party')
url = models.CharField(max_length=1337)
websites = ListField()
lachambre_id = models.CharField(max_length=1337, unique=True)
language = models.CharField(max_length=1337, null=True)
cv = models.CharField(max_length=1337)
commissions = ListField(EmbeddedModelField('CommissionMembership'))
documents_principal_author_url = models.URLField()
documents_principal_author_list = ListField(EmbeddedModelField('Document'))
documents_principal_signator_url = models.URLField()
documents_principal_signator_list = ListField(EmbeddedModelField('Document'))
documents_next_author_url = models.URLField()
documents_next_author_list = ListField(EmbeddedModelField('Document'))
documents_next_signator_url = models.URLField()
documents_next_signator_list = ListField(EmbeddedModelField('Document'))
documents_rapporter_url = models.URLField()
documents_rapporter_list = ListField(EmbeddedModelField('Document'))
questions_written_url = models.URLField()
questions_written_list = ListField(EmbeddedModelField('WrittenQuestion'))
questions_oral_plenary_url = models.URLField()
questions_oral_plenary_list = ListField(EmbeddedModelField('Question'))
questions_oral_commission_url = models.URLField()
questions_oral_commission_list = ListField(EmbeddedModelField('Question'))
def __unicode__(self):
return '%s - %s' % (self.full_name, self.party)
class Party(models.Model, Jsonify):
name = models.CharField(max_length=1337)
url = models.URLField()
def __unicode__(self):
return self.name
class CommissionMembership(models.Model, Jsonify):
name = models.CharField(max_length=1337)
role = models.CharField(max_length=1337)
url = models.URLField()
commission = models.ForeignKey('Commission')
class Commission(models.Model, Jsonify):
lachambre_id = models.IntegerField(unique=True)
class Document(models.Model, Jsonify):
title = models.CharField(max_length=1337)
url = models.CharField(max_length=1337)
status_chambre = models.CharField(max_length=1337, null=True)
status_senat = models.CharField(max_length=1337, null=True)
eurovoc_main_descriptor = models.CharField(max_length=1337, null=True)
date = models.CharField(max_length=1337, null=True)
eurovoc_descriptors = ListField()
keywords = ListField()
lachambre_id = models.IntegerField(unique=True)
class WrittenQuestion(models.Model, Jsonify):
title = models.CharField(max_length=1337)
departement = models.CharField(max_length=1337, )
eurovoc_descriptors = ListField()
deposition_date = models.CharField(max_length=1337, )
delay_date = models.CharField(max_length=1337, null=True)
keywords = ListField()
url = models.URLField()
lachambre_id = models.CharField(max_length=1337)
class Question(models.Model, Jsonify):
title = models.CharField(max_length=1337)
reunion_type = models.CharField(max_length=1337, null=True)
reunion_date = models.CharField(max_length=1337, null=True)
session_id = models.CharField(max_length=1337, )
eurovoc_descriptors = ListField()
keywords = ListField()
pdf_url = models.URLField(null=True)
url = models.URLField()
type = models.CharField(max_length=1337)
lachambre_id = models.CharField(max_length=1337)
class Analysis(models.Model, Jsonify):
title = models.CharField(max_length=1337)
descriptor = models.CharField(max_length=1337)
url = models.URLField()
type = models.CharField(max_length=1337)
lachambre_id = models.CharField(max_length=1337)
| Python | 0.999998 | @@ -3732,24 +3732,112 @@
ique=True)%0A%0A
+ def __unicode__(self):%0A return %22%25s - %25s%22 %25 (self.lachambre_id, self.title)%0A%0A%0A
class Writte
|
e94e7ae0f52ca0f566127ac824a7471751f16924 | version 0.5.3.1 | api/info.py | api/info.py | from collections import OrderedDict
from rest_framework import viewsets, mixins, response, reverse
NAME = 'vsemionov.notes.api'
VERSION = '0.5.3'
class InfoViewSet(mixins.ListModelMixin,
viewsets.GenericViewSet):
view_name = 'Info'
@staticmethod
def _get_user_url(request):
return request.user.id and reverse.reverse('user-detail', request=request, args=[request.user.username])
def get_view_name(self):
return self.view_name
def list(self, request, *args, **kwargs):
app = OrderedDict((('name', NAME),
('version', VERSION)))
user = OrderedDict((('username', request.user.username),
('url', self._get_user_url(request))))
info = OrderedDict((('app', app),
('user', user)))
return response.Response(info)
| Python | 0.000003 | @@ -139,16 +139,18 @@
= '0.5.3
+.1
'%0A%0A%0Aclas
|
d6d70ed4e27b0e43536ea0e55321189a19bd146f | add graphene debug middleware | web/settings.py | web/settings.py | """
Django settings for doublefault project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!=npx8on^!h4lw%4ml1t($b4pdty2!g7@4v+^q0(hle@w+55!!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'web.apps.WebConfig',
'social_django',
'graphene_django'
]
GRAPHENE = {
'SCHEMA': 'doublefault.schema.schema'
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
]
ROOT_URLCONF = 'web.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'doublefault',
'USER': 'doublefault',
'PASSWORD': 'doublefault',
'HOST': 'df-postgres'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = [
'social_core.backends.google.GoogleOAuth2',
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = 'home'
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
)
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ['GOOGLE_OAUTH_KEY']
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ['GOOGLE_OAUTH_SECRET']
| Python | 0.000002 | @@ -1193,16 +1193,98 @@
.schema'
+,%0A 'MIDDLEWARE': %5B%0A 'graphene_django.debug.DjangoDebugMiddleware',%0A %5D
%0A%7D%0A%0AMIDD
|
76f59a0be5fd8321d71fe5a7deb223daac2a5181 | version 0.5.17 | api/info.py | api/info.py | from collections import OrderedDict
from rest_framework import viewsets, mixins, response, reverse
NAME = 'vsemionov.boomerang.api'
VERSION = '0.5.16'
class ApiInfoViewSet(mixins.ListModelMixin,
viewsets.GenericViewSet):
view_name = 'Api Info'
@staticmethod
def _get_user_url(request):
return request.user.id and reverse.reverse('user-detail', request=request, args=[request.user.username])
def get_view_name(self):
return self.view_name
def list(self, request, *args, **kwargs):
app = OrderedDict((('name', NAME),
('version', VERSION)))
user = OrderedDict((('username', request.user.username),
('url', self._get_user_url(request))))
info = OrderedDict((('app', app),
('user', user)))
return response.Response(info)
| Python | 0.000001 | @@ -147,9 +147,9 @@
.5.1
-6
+7
'%0A%0A%0A
|
a962f1e0aced277e673eddc6b70e316bba482f24 | fix typo | api/mail.py | api/mail.py | from flask import Flask, render_template
from api import app
from api.models import User, Invites, Reset
from flask_mail import Mail
from flask_mail import Message
app.config.update(
MAIL_SERVER = 'smtp.yandex.com',
MAIL_PORT = 465,
MAIL_USE_SSL = True ,
MAIL_USERNAME = 'cross-apps@yandex.com',
MAIL_PASSWORD = 'innovativeproject',
)
mail = Mail(app)
def send_email(subject, sender, recipients, html_body):
"""
Sends email of given subject, sender, recipents (array) and html template.
"""
msg = Message(subject=subject, sender=sender, recipients=recipients)
msg.html = html_body
mail.send(msg)
def send_email_register(sender,recip):
"""
User invitation email.
"""
email = recip[0]
username = email.split('@')[0]
admin = sender.split('@')[0]
new = Invites.query.filter_by(email = email).first()
url = 'https://cross-app-links.herokuapp.com/api/auth/setpassword?token=' + str(new.token)
subject = "Cross-apps registration"
headerText = "You've received an invitation!"
freeText = "Administrator has invited you to join Cross-apps shortcuts!"
userTextBold = "You can complete your registartion by clicking the button or entering the link. \n Set up your unique password and make yourself home!"
userText = ""
send_email(subject,
'cross-apps@yandex.com',
recip,
render_template("email_reset_template.html",
user=username,
sender=admin,
url=url,
subject=subject,
buttonText="Register",
headerText=headerText,
freeText=freeText,
userTextBold=userTextBold,
userText=userText))
def send_email_reset(email):
"""
User password reset email.
"""
recipent = email[0]
username = recipent.split('@')[0]
new = Reset.query.filter_by(email = recipent).first()
url = 'https://cross-app-links.herokuapp.com/api/auth/setnewpassword?token=' + str(new.token)
subject = "Cross-apps password reset"
headerText = "Looks like you want to reset your password!"
freeText = "Here we send you instructions to set up a new password for your account!"
userTextBold = "Please proceed by clicking the button. \n You will be displayed a page that will allow you to set a new password."
userText = "If you forget your password again, please consider drinking green tea. Green tea contains polyphenols, powerful antioxidants that protect against free radicals that can damage brain cells. Among many other benefits, regular consumption of green tea may enhance memory and mental alertness and slow brain aging."
send_email(subject,
'cross-apps@yandex.com',
email,
render_template("email_template.html",
user=username,
sender="system",
url=url,
subject=subject,
buttonText="RESET",
headerText=headerText,
freeText=freeText,
userTextBold=userTextBold,
userText=userText)) | Python | 0.998939 | @@ -1414,14 +1414,8 @@
ail_
-reset_
temp
|
f9a1da6e60bfbd9c9e5be769f1223d628cec6481 | set the module version | base_external_referentials/__openerp__.py | base_external_referentials/__openerp__.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Akretion (<http://www.akretion.com>). All Rights Reserved
# authors: Raphaël Valyi, Sharoon Thomas
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Base External Referentials',
'version': '1.0',
'category': 'Generic Modules/Base',
'description': """
Definition : a referential is an external system that will interacts with OpenERP
Goal : store external system connection details and objects fields mapping
This module provide an abstract common minimal base to add any additional external id columns
to some OpenObject table, pointing to some external referential.
A referential is abstract and minimal at this stage, it only has:
* a name
* a location (possibly webservice URL, database connection URL...); the connection method will tell it...
* referential credentials (user name + password)
* placeholders for custom in and out mapping for OpenERP object fields.
OpenERP already has limited supported to external ids using the ir_model_data and the id
fields in the loaded data such as XML or CSV. We think that's OK to store all referential ids
into the same ir_model_data table: yes it makes it large, but synchronisation operations involve
a network bottleneck anyway, so it's largely OK and negligible to have a large table here.
The existing ir_model_data feature of OpenERP is mostly thought as an mono-external referential
(even if the module key of ir_model_data plays some referential scoping role). Here we just push
the concept further to assume multiple external ids for OpenERP entities and add the possibility
to customize their field mapping directly in OpenERP to accomodate the external systems.
""",
'author': 'Raphaël Valyi (Akretion.com), Sharoon Thomas (Openlabs.co.in)',
'website': 'http://www.akretion.com, http://openlabs.co.in/',
'depends': ['base','base_pop_up', 'base_file_protocole', 'email_template'],
'init_xml': [],
'update_xml': [
'external_referentials_view.xml',
'report_view.xml',
'external_referentials_menu.xml',
'security/ir.model.access.csv',
'group_fields_view.xml',
'security/base_external_referentials_security.xml',
'report_mail_template.xml',
],
'demo_xml': [],
'installable': True,
'certificate': '',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Python | 0 | @@ -1081,16 +1081,18 @@
sion': '
+6.
1.0',%0A
|
6eeb2b4f79c2f735552cf7c061b48425d3299e51 | Use argparse. | validate_equajson.py | validate_equajson.py | #! /usr/bin/env python3
import json
import jsonschema
import sys
import os
def main(equajson_path, schema_path):
global filepath
filepath = equajson_path
with open(schema_path) as schema_file:
try:
equajson_schema = json.load(schema_file)
except:
sys.stderr.write("Invalid JSON in schema: `"+schema_file.name+"'"+'\n')
raise
with open(equajson_path) as json_file:
try:
equajson = json.load(json_file)
except:
sys.stderr.write("Invalid JSON in file: `"+json_file.name+"'"+'\n')
raise
try:
jsonschema.validate(equajson, equajson_schema)
except jsonschema.exceptions.ValidationError:
sys.stderr.write(json_file.name+'\n')
raise
basename_no_extension = os.path.splitext(os.path.basename(json_file.name))[0]
# It's easier to make this a global variable
# than to thread it through every function.
filepath = None
if __name__ == '__main__':
num_args = len(sys.argv) - 1
if num_args != 2:
sys.stderr.write("Usage: python "+sys.argv[0]+" equajson.json schema.json"+'\n')
sys.exit(1)
main(sys.argv[1], sys.argv[2])
| Python | 0.000001 | @@ -67,16 +67,32 @@
mport os
+%0Aimport argparse
%0A%0Adef ma
@@ -1103,199 +1103,365 @@
-num_args = len(sys.argv) - 1%0A if num_args != 2:%0A sys.stderr.write(%22Usage: python %22+sys.argv%5B0%5D+%22 equajson.json schema.json%22+'%5Cn')
+parser = argparse.ArgumentParser(description='validate equajson files')%0A parser.add_argument(%0A '-s',%0A '--schema',%0A help='path to schema file',%0A required=True%0A )%0A parser.add_argument(%0A 'json_file',%0A help='path to json file to validate'
%0A
+)%0A
-sys.exit(1)%0A main(sys.argv%5B1%5D, sys.argv%5B2%5D
+args = parser.parse_args()%0A main(args.json_file, args.schema
)%0A
|
72fd62dca8e87e67833698dd7e38b879c54d6e27 | use env. variable for sender | api/mail.py | api/mail.py | from flask import Flask, render_template
from api import app
from api.models import User, Invite, Reset, ComponentUser
from flask_mail import Mail
from flask_mail import Message
import os
app.config.update(
MAIL_SERVER = str(os.environ['MAIL_SERVER']),
MAIL_PORT = str(os.environ['MAIL_PORT']),
MAIL_USE_SSL = str(os.environ['MAIL_USE_SSL']),
MAIL_USERNAME = str(os.environ['MAIL_USERNAME']),
MAIL_PASSWORD = str(os.environ['MAIL_PASSWORD']),
)
mail = Mail(app)
def send_email(subject, sender, recipients, html_body):
"""
Sends email of given subject, sender, recipients (array) and html template.
"""
msg = Message(subject=subject, sender=sender, recipients=recipients)
msg.html = html_body
mail.send(msg)
def send_email_register(sender,email):
"""
User invitation email.
"""
recipient = email[0]
username = recipient.split('@')[0]
admin = sender.split('@')[0]
new = Invite.query.filter_by(email = recipient).order_by('-id').first()
url = str(os.environ['SERVER_ADDRESS']) + '/api/auth/setpassword?token=' + str(new.token)
subject = "Cross-apps registration"
headerText = "You've received an invitation!"
freeText = "Administrator has invited you to join Cross-apps shortcuts!"
userTextBold = "You can complete your registartion by clicking the button or entering the link. \n Set up your unique password and make yourself home!"
userText = ""
send_email(subject,
'cross-apps@yandex.com',
email,
render_template("email-template.html",
user=username,
sender=admin,
url=url,
subject=subject,
buttonText="Register",
headerText=headerText,
freeText=freeText,
userTextBold=userTextBold,
userText=userText))
def send_email_reset(email):
"""
User password reset email.
"""
recipient = email[0]
username = recipient.split('@')[0]
new = Reset.query.filter_by(email = recipient).order_by('-id').first()
url = str(os.environ['SERVER_ADDRESS']) + '/api/auth/setnewpassword?token=' + str(new.token)
subject = "Cross-apps password reset"
headerText = "Looks like you want to reset your password!"
freeText = "Here we send you instructions to set up a new password for your account!"
userTextBold = "Please proceed by clicking the button. \n You will be displayed a page that will allow you to set a new password."
userText = "If you forget your password again, please consider drinking green tea. Green tea contains polyphenols, powerful antioxidants that protect against free radicals that can damage brain cells. Among many other benefits, regular consumption of green tea may enhance memory and mental alertness and slow brain aging."
send_email(subject,
'cross-apps@yandex.com',
email,
render_template("email-template.html",
user=username,
sender="system",
url=url,
subject=subject,
buttonText="RESET",
headerText=headerText,
freeText=freeText,
userTextBold=userTextBold,
userText=userText))
def send_email_token(email):
"""
Sending requested token.
"""
recipient = email[0]
username = recipient.split('@')[0]
new = ComponentUser.query.filter_by(email = recipient).order_by('-id').first()
subject = "Cross-apps token delivery!"
headerText = "You've received a Cross-apps token!"
freeText = ""
userTextBold = "Here is your unique token for Cross-app links. \n Token allows you to set your own view order \n and pin your favourite apps to the navbad."
userText = str(new.token)
send_email(subject,
'cross-apps@yandex.com',
email,
render_template("email-template.html",
user=username,
sender="system",
url=str(os.environ['SERVER_ADDRESS']),
subject=subject,
buttonText="Visit our website",
headerText=headerText,
freeText=freeText,
userTextBold=userTextBold,
userText=userText)) | Python | 0.999982 | @@ -1476,39 +1476,48 @@
-'cross-apps@yandex.com'
+str(os.environ%5B'MAIL_USERNAME'%5D)
,%0A
@@ -2851,39 +2851,48 @@
-'cross-apps@yandex.com'
+str(os.environ%5B'MAIL_USERNAME'%5D)
,%0A
@@ -3788,31 +3788,40 @@
-'cross-apps@yandex.com'
+str(os.environ%5B'MAIL_USERNAME'%5D)
,%0A
|
3de4e87af5502feb1186cb1a11b56df018ae6e19 | Fix comment typo | bears/python/requirements/PySafetyBear.py | bears/python/requirements/PySafetyBear.py | import os
from collections import namedtuple
import pkg_resources
import re
from safety import safety
from coalib.bears.LocalBear import LocalBear
from dependency_management.requirements.PipRequirement import PipRequirement
from coalib.results.Result import Result
from coalib.settings.Setting import path
from coalib.results.SourceRange import SourceRange
from coalib.settings.Setting import typed_list
# It was for old versions of safety and those versions will be allow in future.
def cve_key_checker(vulnerability): # pragma: no cover
if 'cve' in vulnerability.data:
if vulnerability.data['cve'] is None:
return None
else:
return True
else:
return None
# the safety module expects an object that looks like this
# (not importing it from there because it's in a private-ish location)
Package = namedtuple('Package', ('key', 'version'))
safety_get_vulnerabilities = safety.get_vulnerabilities
_insecure_full_json_url = ('https://raw.githubusercontent.com/pyupio/'
'safety-db/master/data/insecure_full.json')
_insecure_json_url = ('https://raw.githubusercontent.com/'
'pyupio/safety-db/master/data/insecure.json')
def _get_vulnerabilities(pkg, spec, db):
for entry in safety_get_vulnerabilities(pkg, spec, db):
entry['cve'] = entry['id'] if entry['cve'] is None else entry['cve']
entry['id'] = entry['cve']
yield entry
safety.get_vulnerabilities = _get_vulnerabilities
class PySafetyBear(LocalBear):
"""
Checks if any of your Python dependencies have known security issues.
Data is taken from pyup.io's vulnerability database hosted at
https://github.com/pyupio/safety.
"""
LANGUAGES = {
'Python Requirements',
'Python 2 Requirements',
'Python 3 Requirements',
}
AUTHORS = {'Bence Nagy'}
REQUIREMENTS = {PipRequirement('safety', '1.8.2')}
AUTHORS_EMAILS = {'bence@underyx.me'}
LICENSE = 'AGPL'
CAN_DETECT = {'Security'}
def setup_dependencies(self):
file = self.download_cached_file(_insecure_full_json_url,
'insecure_full.json')
self.download_cached_file(_insecure_json_url,
'insecure.json')
type(self).db_path = os.path.dirname(file)
def run(self, filename, file,
db_path: path = '',
cve_ignore: typed_list(str) = []):
"""
Checks for vulnerable package versions in requirements files.
:param db_path: Path to a local vulnerability database.
:param cve_ignore: A list of CVE number to be ignore.
"""
db_path = self.db_path if not db_path else db_path
packages = list(
Package(key=req.key, version=req.specs[0][1])
for req in self.try_parse_requirements(file)
if len(req.specs) == 1 and req.specs[0][0] == '=='
)
if not packages:
return
for vulnerability in safety.check(packages, key=None,
db_mirror=db_path, cached=False,
ignore_ids=cve_ignore):
if 'cve' in vulnerability.vuln_id.strip().lower():
message_template = (
'{vuln.name}{vuln.spec} is vulnerable to {vuln.vuln_id} '
'and your project is using {vuln.version}.'
)
else:
message_template = (
'{vuln.name}{vuln.spec} is vulnerable to '
'pyup.io-{vuln.vuln_id} and your project is using '
'{vuln.version}.'
)
# StopIteration should not ever happen so skipping its branch
line_number, line = next( # pragma: no branch
(index, line) for index, line in enumerate(file, start=1)
if vulnerability.name in line
)
version_spec_match = re.search(r'[=<>]+(\S+?)(?:$|\s|#)', line)
source_range = SourceRange.from_values(
filename,
line_number,
version_spec_match.start(1) + 1,
line_number,
version_spec_match.end(1) + 1,
)
yield Result(
self,
message_template.format(vuln=vulnerability),
additional_info=vulnerability.advisory,
affected_code=(source_range, ),
)
@staticmethod
def try_parse_requirements(lines: typed_list(str)):
"""
Yields all package requirements parseable from the given lines.
:param lines: An iterable of lines from a requirements file.
"""
for line in lines:
try:
yield from pkg_resources.parse_requirements(line)
except pkg_resources.RequirementParseError:
# unsupported requirement specification
pass
| Python | 0 | @@ -4684,17 +4684,16 @@
nts pars
-e
able fro
|
5ecd20d86a0fe2586cbac4daadd34bb13443f94d | set central prototype executable | central/CentralProto.py | central/CentralProto.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import time
from app.nrf24 import NRF24
from app.cipher import XTEA
from app.message import MessageType
# RF Communication constants
NETWORK = 0xC05A
SERVER_ID = 0x01
# Hardware constants
CE_PIN = 25
# Timing constants
PERIOD_REFRESH_KEY_SECS = 120.0
CODE = '123456'
#TODO refactor all conversion methods into a common place
def byte(val):
return val & 0xFF
def to_int(val):
return (byte(val[0]) << 8) + byte(val[1])
def to_long(val):
return (byte(val[0]) << 24) + (byte(val[1]) << 16) + (byte(val[2]) << 8) + byte(val[3])
def from_long(val):
return [byte(val >> 24), byte(val >> 16), byte(val >> 8), byte (val)]
def convert_key(key):
key2 = []
for i in key:
key2 += from_long(i)
return key2
class Device:
def __init__(self):
self.cipher = XTEA()
self.latest_ping = time.time()
self.latest_voltage_level = None
self.next_key_time = 0
# List of all devices and their encoding keys
keys = {}
# Current alarm status
locked = True
if __name__ == '__main__':
print "Alarm System Central Prototype..."
nrf = NRF24(NETWORK, SERVER_ID)
print "NRF24 instance created..."
nrf.begin(0, 0, CE_PIN)
print "NRF24 instance started..."
while True:
# Wait forever for remote modules calls
#FIXME we should limit the timeout in order to frequently check that all known devices
# are pinging as expected...
payload = nrf.recv()
now = time.clock()
# Have we received something?
if payload:
# Yes, find the originating device and port (message type)
device_id = payload.device
port = payload.port
content = payload.content
# Add the device if first time
device = keys.get(device_id)
if not device:
device = Device()
keys[device_id] = device
print "Source %02X, port %02X" % (device, port)
# Manage received message based on its type (port)
if port == MessageType.PING_SERVER:
device.latest_ping = now
payload = [locked]
# Check if need to generate and send new cipher key
if now >= device.next_key_time:
key = XTEA.generate_key()
device.cipher.set_key(key)
device.next_key_time = now + PERIOD_REFRESH_KEY_SECS
payload = [locked]
payload += convert_key(key)
nrf.send(device_id, port, payload)
elif port == MessageType.VOLTAGE_LEVEL:
device.latest_voltage_level = to_int(content)
print "Source %02X, voltage = %d mV" % (device, device.latest_voltage_level)
elif port in [MessageType.LOCK_CODE, MessageType.UNLOCK_CODE]:
#TODO decipher
code = device.cipher.decipher([to_long(content[0:4]), to_long(content[4:8])])
code = from_long(code[0]) + from_long(code[1])
print "Source %02X, code = %s" % (device, code)
#TODO convert to string and compare to CODE
# Send current lock status
nrf.send(device_id, port, [locked])
else:
print "Source %02X, unknown port %02X!" % (device, port)
| Python | 0.000001 | |
e6cb1617e588d6b276fe01c401f2c1b34cf88d5f | fix stuff | api/read.py | api/read.py | import datetime
from django.http import JsonResponse
from dateutil.parser import parse
from django.contrib.auth.decorators import login_required
from api.models import ( Applicant, Client, Disabilities, EmploymentEducation,
Enrollment, HealthAndDV, IncomeBenefits, Services )
def get_applicants(request):
applicant = {}
return JsonResponse(applicant)
def search_clients(request):
'''
request.POST =
query
'''
clients = Client.objects.all()
if 'query' in request.POST:
q = request.POST['query']
if q.isdigit():
clients = clients.filter(uuid=q)
else:
clients = clients.filter(last_name__contains=q)
return JsonResponse([{
"first_name": c.first_name,
"middle_name": c.middle_name,
"last_name": c.last_name,
"social_security": c.social_security,
"date_of_birth": datetime.datetime.strftime(c.date_of_birth, '%m/%d/%Y'),
"ethnicity": 1,
"gender": 1,
"veteran": 1,
"year_entered": c.year_entered,
"year_exited": c.year_exited,
"date_created": c.date_created
} for c in clients], safe=False)
<<<<<<< Updated upstream
def get_applicants(request):
app_list = Applicant.objects.all()
applicant = [{
"id": c.id,
"first_name": c.first_name,
"last_name": c.last_name,
"why": c.why,
"phone": c.phone,
"email": c.emial,
"address": c.address,
"birthday": c.birthday,
"ethnicity": value_maps.ethnicity[c.ethnicity],
"gender": value_maps.gender[c.gender],
"veteran": value_maps.veteran[c.veteran],
"family": c.family,
"domestic_violence": value_maps.domestic_violence[c.domestic_violence],
"pregnancy": c.pregnancy,
"drug": c.drug,
"urgency": c.urgency,
"created": c.created,
"reviewed": c.reviewed,
} for c in app_list]
return JsonResponse(applicant, safe=False)
| Python | 0.000002 | @@ -1181,33 +1181,8 @@
lse)
-%0A%3C%3C%3C%3C%3C%3C%3C Updated upstream
%0A%0Ade
|
ae7b583cab8d38b04ce57571f50221b4a2e429f6 | Update base.py | webhook/base.py | webhook/base.py | """
Base webhook implementation
"""
import json
from django.http import HttpResponse
from django.views.generic import View
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
class WebhookBase(View):
"""
Simple Webhook base class to handle the most standard case.
"""
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(WebhookBase, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
data = json.loads(request.body.decode('utf-8'))
self.process_webhook(data)
return HttpResponse(status=200)
def process_webhook(self, data):
"""
Unimplemented method
"""
raise NotImplementedError
| Python | 0.000001 | @@ -705,16 +705,21 @@
lf, data
+=None
):%0A
|
46b860e93d8a9e8dda3499b7306e30ebcd0e0174 | handle session stopped | webnotes/app.py | webnotes/app.py | import sys, os
import json
sys.path.insert(0, '.')
sys.path.insert(0, 'app')
sys.path.insert(0, 'lib')
from werkzeug.wrappers import Request, Response
from werkzeug.local import LocalManager
from webnotes.middlewares import StaticDataMiddleware
from werkzeug.exceptions import HTTPException
from werkzeug.contrib.profiler import ProfilerMiddleware
from webnotes import get_config
import mimetypes
import webnotes
import webnotes.handler
import webnotes.auth
import webnotes.webutils
local_manager = LocalManager([webnotes.local])
@Request.application
def application(request):
webnotes.local.request = request
try:
site = webnotes.utils.get_site_name(request.host)
webnotes.init(site=site)
webnotes.local.form_dict = webnotes._dict({ k:v[0] if isinstance(v, (list, tuple)) else v \
for k, v in (request.form or request.args).iteritems() })
webnotes.local._response = Response()
try:
webnotes.http_request = webnotes.auth.HTTPRequest()
except webnotes.AuthenticationError, e:
pass
if webnotes.form_dict.cmd:
webnotes.handler.handle()
else:
webnotes.webutils.render(webnotes.request.path[1:])
except HTTPException, e:
return e
finally:
if webnotes.conn:
webnotes.conn.close()
return webnotes._response
application = local_manager.make_middleware(application)
def serve(port=8000, profile=False):
webnotes.validate_versions()
global application
from werkzeug.serving import run_simple
if profile:
application = ProfilerMiddleware(application)
application = StaticDataMiddleware(application, {
'/': 'public',
})
run_simple('0.0.0.0', int(port), application, use_reloader=True,
use_debugger=True, use_evalex=True)
| Python | 0 | @@ -528,16 +528,438 @@
ocal%5D)%0A%0A
+def handle_session_stopped():%0A%09res = Response(%22%22%22%3Chtml%3E%0A%09%09%09%09%09%09%09%3Cbody style=%22background-color: #EEE;%22%3E%0A%09%09%09%09%09%09%09%09%09%3Ch3 style=%22width: 900px; background-color: #FFF; border: 2px solid #AAA; padding: 20px; font-family: Arial; margin: 20px auto%22%3E%0A%09%09%09%09%09%09%09%09%09%09%09Updating.%0A%09%09%09%09%09%09%09%09%09%09%09We will be back in a few moments...%0A%09%09%09%09%09%09%09%09%09%3C/h3%3E%0A%09%09%09%09%09%09%09%3C/body%3E%0A%09%09%09%09%09%3C/html%3E%22%22%22)%0A%09res.status_code = 503%0A%09res.content_type = 'text/html'%0A%09return res%0A%0A
@Request
@@ -1599,16 +1599,109 @@
rn e%0A%09%09%0A
+%09except webnotes.SessionStopped, e:%0A%09%09webnotes.local._response = handle_session_stopped()%0A%09%09%0A
%09finally
@@ -1766,16 +1766,22 @@
ebnotes.
+local.
_respons
|
5344220edcf1f22391a3ba0452e92d79f55a85a7 | add method wait() to block until every backends answered | weboob/bcall.py | weboob/bcall.py | # -*- coding: utf-8 -*-
# Copyright(C) 2010 Romain Bignon, Christophe Benz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from __future__ import with_statement
from copy import copy
import logging
from logging import debug
from threading import Thread, Event, RLock, Timer
from .tools.misc import get_backtrace
__all__ = ['BackendsCall', 'CallErrors']
class CallErrors(Exception):
def __init__(self, errors):
Exception.__init__(self, u'These errors have been raised in backend threads '\
'(use --debug option to print backtraces):\n%s' % (
u'\n'.join((u' * %s: %s%s' % (backend, error, backtrace + '\n'
if logging.root.level == logging.DEBUG else ''))
for backend, error, backtrace in errors)))
self.errors = copy(errors)
def __iter__(self):
return self.errors.__iter__()
class BackendsCall(object):
def __init__(self, backends, function, *args, **kwargs):
"""
@param backends list of backends to call
@param function backends' method name, or callable object
@param args, kwargs arguments given to called functions
"""
# Store if a backend is finished
self.backends = {}
for backend in backends:
self.backends[backend.name] = False
# Global mutex on object
self.mutex = RLock()
# Event set when every backends have give their data
self.finish_event = Event()
# Event set when there are new responses
self.response_event = Event()
# Waiting responses
self.responses = []
# Errors
self.errors = []
# Threads
self.threads = []
# Create jobs for each backend
with self.mutex:
for backend in backends:
debug('Creating a new thread for %s' % backend)
self.threads.append(Timer(0, self._caller, (backend, function, args, kwargs)).start())
if not backends:
self.finish_event.set()
def _store_error(self, backend, error):
with self.mutex:
backtrace = get_backtrace(error)
self.errors.append((backend, error, backtrace))
def _store_result(self, backend, result):
with self.mutex:
self.responses.append((backend, result))
self.response_event.set()
def _caller(self, backend, function, args, kwargs):
debug('%s: Thread created successfully' % backend)
with backend:
try:
# Call method on backend
try:
debug('%s: Calling function %s' % (backend, function))
if callable(function):
result = function(backend, *args, **kwargs)
else:
result = getattr(backend, function)(*args, **kwargs)
except Exception, error:
self._store_error(backend, error)
else:
debug('%s: Called function %s returned: "%s"' % (backend, function, result))
if hasattr(result, '__iter__'):
# Loop on iterator
try:
for subresult in result:
# Lock mutex only in loop in case the iterator is slow
# (for example if backend do some parsing operations)
self._store_result(backend, subresult)
except Exception, error:
self._store_error(backend, error)
else:
self._store_result(backend, result)
finally:
with self.mutex:
# This backend is now finished
self.backends[backend.name] = True
for finished in self.backends.itervalues():
if not finished:
return
self.finish_event.set()
self.response_event.set()
def _callback_thread_run(self, callback, errback):
responses = []
while not self.finish_event.isSet() or self.response_event.isSet():
self.response_event.wait()
with self.mutex:
responses = self.responses
self.responses = []
# Reset event
self.response_event.clear()
# Consume responses
while responses:
callback(*responses.pop(0))
if errback:
with self.mutex:
while self.errors:
errback(*self.errors.pop(0))
callback(None, None)
def callback_thread(self, callback, errback=None):
"""
Call this method to create a thread which will callback a
specified function everytimes a new result comes.
When the process is over, the function will be called with
both arguments set to None.
The functions prototypes:
def callback(backend, result)
def errback(backend, error)
"""
thread = Thread(target=self._callback_thread_run, args=(callback, errback))
thread.start()
return thread
def __iter__(self):
# Don't know how to factorize with _callback_thread_run
responses = []
while not self.finish_event.isSet() or self.response_event.isSet():
self.response_event.wait()
with self.mutex:
responses = self.responses
self.responses = []
# Reset event
self.response_event.clear()
# Consume responses
while responses:
yield responses.pop(0)
# Raise errors
with self.mutex:
if self.errors:
raise CallErrors(self.errors)
| Python | 0 | @@ -5959,16 +5959,170 @@
thread%0A%0A
+ def wait(self):%0A self.finish_event.wait()%0A%0A with self.mutex:%0A if self.errors:%0A raise CallErrors(self.errors)%0A%0A
def
|
e38fa3f55b0e60a1d6c7fa0cf194e6f3bd4b899d | add histogram util | corehq/util/datadog/gauges.py | corehq/util/datadog/gauges.py | from functools import wraps
from celery.task import periodic_task
from corehq.util.datadog import statsd, datadog_logger
from corehq.util.soft_assert import soft_assert
def datadog_gauge_task(name, fn, run_every, enforce_prefix='commcare'):
"""
helper for easily registering datadog gauges to run periodically
To update a datadog gauge on a schedule based on the result of a function
just add to your app's tasks.py:
my_calculation = datadog_gauge_task('my.datadog.metric', my_calculation_function,
run_every=crontab(minute=0))
"""
_enforce_prefix(name, enforce_prefix)
datadog_gauge = _DatadogGauge(name, fn, run_every)
return datadog_gauge.periodic_task()
def datadog_gauge(name, value, enforce_prefix='commcare', tags=None):
_datadog_record(statsd.gauge, name, value, enforce_prefix, tags)
def datadog_counter(name, value=1, enforce_prefix='commcare', tags=None):
_datadog_record(statsd.increment, name, value, enforce_prefix, tags)
def _datadog_record(fn, name, value, enforce_prefix='commcare', tags=None):
_enforce_prefix(name, enforce_prefix)
try:
fn(name, value, tags=tags)
except Exception:
datadog_logger.exception('Unable to record Datadog stats')
class _DatadogGauge(object):
def __init__(self, name, fn, run_every):
self.name = name
self.fn = fn
self.run_every = run_every
def periodic_task(self):
@periodic_task('background_queue', run_every=self.run_every,
acks_late=True, ignore_result=True)
@wraps(self.fn)
def inner(*args, **kwargs):
statsd.gauge(self.name, self.fn(*args, **kwargs))
return inner
def _enforce_prefix(name, prefix):
soft_assert(fail_if_debug=True).call(
not prefix or name.split('.')[0] == prefix,
"Did you mean to call your gauge 'commcare.{}'? "
"If you're sure you want to forgo the prefix, you can "
"pass enforce_prefix=None".format(name))
| Python | 0.000786 | @@ -739,24 +739,332 @@
ic_task()%0A%0A%0A
+def datadog_histogram(name, value, enforce_prefix='commcare', tags=None):%0A %22%22%22%0A Usage: Used to track the statistical distribution of a set of values over a statsd flush period.%0A Actually submits as multiple metrics:%0A %22%22%22%0A _datadog_record(statsd.histogram, name, value, enforce_prefix, tags)%0A%0A%0A
def datadog_
|
3643f0ce1b7ea7982e8081ae29e726c73471cc4b | update description | vcspull/__about__.py | vcspull/__about__.py | __title__ = 'vcspull'
__package_name__ = 'vcspull'
__description__ = 'vcs project manager'
__version__ = '1.0.0'
__author__ = 'Tony Narlock'
__email__ = 'tony@git-pull.com'
__license__ = 'BSD'
__copyright__ = 'Copyright 2013-2016 Tony Narlock'
| Python | 0.000001 | @@ -67,27 +67,30 @@
= '
-vcs project manager
+synchronize your repos
'%0A__
|
42561d709a2ecfee71103dfbb55116cec1128b71 | fix redirect after upload | website/apps/home/views/UploadView.py | website/apps/home/views/UploadView.py | #!/bin/env python2
# -*- coding: utf-8 -*-
#
# This file is part of the VecNet Zika modeling interface.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/vecnet/zika
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
import logging
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http.response import HttpResponseBadRequest, HttpResponseRedirect
from django.views.generic.base import TemplateView
from website.apps.home.utils import load_simulation_file
logger = logging.getLogger(__name__)
class UploadView(TemplateView):
template_name = "../templates/simulation/upload.html"
@transaction.atomic
def post(self, request, *args, **kwargs):
if request.method == 'POST':
if not request.FILES['output_file']:
return HttpResponseBadRequest("No 'output_file' is provided")
else:
sim_name = self.request.POST.get(u"name", None)
is_historical = self.request.POST.get("historical")
load_simulation_file(request.FILES['output_file'], simulation_name=sim_name, is_historical=is_historical)
return HttpResponseRedirect(reverse('home.display_simulations'))
else:
return HttpResponseRedirect("")
| Python | 0 | @@ -1439,69 +1439,309 @@
-return HttpResponseRedirect(reverse('home.display_simulations
+# Redirect to appropriate page whether uploading simulation or historical%0A if is_historical!='on':%0A return HttpResponseRedirect(reverse('home.display_simulations'))%0A else:%0A return HttpResponseRedirect(reverse('home.display_historical
'))%0A
|
c9a915692b30458717ead2f83fce77ce295e5ed9 | add recipe_folder member (#10527) | conans/pylint_plugin.py | conans/pylint_plugin.py | """Pylint plugin for ConanFile"""
import astroid
from astroid import MANAGER
def register(linter):
"""Declare package as plugin
This function needs to be declared so astroid treats
current file as a plugin.
"""
pass
def transform_conanfile(node):
"""Transform definition of ConanFile class so dynamic fields are visible to pylint"""
str_class = astroid.builtin_lookup("str")
info_class = MANAGER.ast_from_module_name("conans.model.info").lookup(
"ConanInfo")
build_requires_class = MANAGER.ast_from_module_name(
"conans.client.graph.graph_manager").lookup("_RecipeBuildRequires")
file_copier_class = MANAGER.ast_from_module_name(
"conans.client.file_copier").lookup("FileCopier")
file_importer_class = MANAGER.ast_from_module_name(
"conans.client.importer").lookup("_FileImporter")
python_requires_class = MANAGER.ast_from_module_name(
"conans.client.graph.python_requires").lookup("PyRequires")
dynamic_fields = {
"conan_data": str_class,
"build_requires": build_requires_class,
"info_build": info_class,
"info": info_class,
"copy": file_copier_class,
"copy_deps": file_importer_class,
"python_requires": [str_class, python_requires_class],
}
for f, t in dynamic_fields.items():
node.locals[f] = [t]
MANAGER.register_transform(
astroid.ClassDef, transform_conanfile,
lambda node: node.qname() == "conans.model.conan_file.ConanFile")
def _python_requires_member():
return astroid.parse("""
from conans.client.graph.python_requires import ConanPythonRequire
python_requires = ConanPythonRequire()
""")
astroid.register_module_extender(astroid.MANAGER, "conans", _python_requires_member)
| Python | 0 | @@ -1290,16 +1290,52 @@
class%5D,%0A
+ %22recipe_folder%22: str_class,%0A
%7D%0A%0A
|
4b5ae262bab0bc0c83555d39400049f20aaca9cd | Add CONVERSATION_LABEL_MAX_LENGTH constant | chatterbot/constants.py | chatterbot/constants.py | """
ChatterBot constants
"""
'''
The maximum length of characters that the text of a statement can contain.
This should be enforced on a per-model basis by the data model for each
storage adapter.
'''
STATEMENT_TEXT_MAX_LENGTH = 400
# The maximum length of characters that the name of a tag can contain
TAG_NAME_MAX_LENGTH = 50
DEFAULT_DJANGO_APP_NAME = 'django_chatterbot'
| Python | 0.999974 | @@ -228,16 +228,253 @@
= 400%0A%0A
+'''%0AThe maximum length of characters that the text label of a conversation can contain.%0AThe number 32 was chosen because that is the length of the string representation%0Aof a UUID4 with no hyphens.%0A'''%0ACONVERSATION_LABEL_MAX_LENGTH = 32%0A%0A
# The ma
|
7a1e57fa5c6d2c6330a73e8fab95c5ef6fa0ea35 | Fix indentation | tomviz/python/SetNegativeVoxelsToZero.py | tomviz/python/SetNegativeVoxelsToZero.py | def transform_scalars(dataset):
"""Set negative voxels to zero"""
from tomviz import utils
import numpy as np
data = utils.get_array(dataset)
data[data<0] = 0 #set negative voxels to zero
# set the result as the new scalars.
utils.set_array(dataset, data)
| Python | 0.017244 | @@ -63,20 +63,16 @@
zero%22%22%22%0A
-
%0A fro
@@ -204,18 +204,16 @@
to zero%0A
-
%0A # s
@@ -280,13 +280,8 @@
t, data)
-%0A
|
3d285c492aa267d2c27fad32a25546fc3db0c04d | Update protocol.py | gu/protocol.py | gu/protocol.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" description
参照 twisted/asyncio重新设计下api
"""
__author__ = 'wangfei'
__date__ = '2015/03/06'
import gevent.monkey
gevent.monkey.patch_socket()
import logging
import gevent
import gevent.queue
import gevent.pywsgi
import geventwebsocket.handler
from .util import shorten
logger = logging.getLogger(__name__)
def id_generator():
i = 0
wall = 1 << 31
while True:
i += 1
if i > wall:
i = 1
yield i
class ProtocolException(Exception):
pass
class Protocol(object):
""" Server / Client Handler
"""
id_generator = id_generator()
read_deadline = 0
recv_buf_size = 256
def __init__(self, *args, **kwargs):
self.sock = args[0]
self.address = args[1]
self.sendq = gevent.queue.Queue() # 发送消息队列
self.recv_buf_size = kwargs.get('recv_buf_size', self.recv_buf_size)
self.read_deadline = kwargs.get('read_deadline', self.read_deadline) # 心跳超时: 0-不超时
self.session_id = self.id_generator.next()
self.sender_glet = gevent.spawn(self.loop_sending)
self.receiver_glet = gevent.spawn(self.loop_recving)
self.connection_made()
def connection_made(self):
logger.info('connection made')
def data_received(self, data):
logger.debug('data received: %s', shorten(data, 32))
def connection_lost(self, reason):
logger.info('connection lost: %s', reason)
self.close_protocol()
def send_data(self, data):
"""异步发送"""
logger.debug('send data: %s', shorten(data, 32))
self.sendq.put(data)
def send_lose(self, data):
"""发送消息然后断开"""
self.send_rest()
try:
self.sock.sendall(data)
except:
logger.warn('send lose except', exc_info=1)
self.close_protocol()
def send_rest(self):
"""把sendq队列里剩余的发完"""
while not self.sendq.empty():
data = self.sendq.get()
try:
self.sock.sendall(data)
except:
logger.warn('send one except', exc_info=1)
self.close_protocol()
break
def loop_recving(self):
reason = ''
while True:
try:
if self.read_deadline is not 0:
with gevent.Timeout(self.read_deadline, ProtocolException('msg timeout')):
data = self.sock.recv(self.recv_buf_size)
else:
data = self.sock.recv(self.recv_buf_size)
except Exception as e:
self.sock = None
if isinstance(e, ProtocolException):
reason = 'msg timeout'
else:
reason = 'loop recving except'
logger.warn('loop recving except', exc_info=1)
break
if not data:
reason = 'loop recving none data'
break
self.data_received(data)
self.connection_lost(reason)
def loop_sending(self):
reason = ''
while True:
data = self.sendq.get()
try:
self.sock.sendall(data)
except:
logger.warn('loop sending except', exc_info=1)
reason = 'loop sending except'
break
self.connection_lost(reason)
def close_protocol(self):
try:
self.sender_glet.kill()
except:
logger.info('greenlet sender kill except')
if self.sock:
self.sock.close()
self.sock = None
try:
self.receiver_glet.kill()
except:
logger.info('greenlet receiver kill except')
class HookLogWSGIHandler(gevent.pywsgi.WSGIHandler):
""" hook gevent.pywsgi.WSGIHandler
>>> from gevent.pywsgi import WSGIServer
>>> server = WSGIServer(('127.0.0.1', 6000), app, handler_class=HookLogWSGIHandler)
>>> server.serve_forever()
"""
def log_request(self):
logger.debug(self.format_request())
def format_request(self):
length = self.response_length or '-'
if self.time_finish:
delta = '%.6f' % (self.time_finish - self.time_start)
else:
delta = '-'
# MARK: 如果使用nginx反向代理,需要根据nginx配置修改client_address为真是ip
client_address = self.client_address[0] if isinstance(self.client_address, tuple) else self.client_address
return '%s - - "%s" %s %s %s' % (
client_address or '-',
getattr(self, 'requestline', ''),
(getattr(self, 'status', None) or '000').split()[0],
length,
delta)
class HookLogWSHandler(geventwebsocket.handler.WebSocketHandler):
""" hook geventwebsocket.handler.WebSocketHandler(支持 websocket 的 wsgi handler)
>>> from gevent.pywsgi import WSGIServer
>>> server = WSGIServer(('127.0.0.1', 6000), app, handler_class=HookLogWSHandler)
>>> server.serve_forever()
"""
def log_request(self):
logger.debug(self.format_request())
def format_request(self):
length = self.response_length or '-'
if self.time_finish:
delta = '%.6f' % (self.time_finish - self.time_start)
else:
delta = '-'
# MARK: 如果使用nginx反向代理,需要根据nginx配置修改client_address为真是ip
client_address = self.client_address[0] if isinstance(self.client_address, tuple) else self.client_address
return '%s - - "%s" %s %s %s' % (
client_address or '-',
getattr(self, 'requestline', ''),
(getattr(self, 'status', None) or '000').split()[0],
length,
delta)
| Python | 0.000001 | @@ -1,26 +1,4 @@
-#!/usr/bin/env python%0A
# -*
|
e504ef393f9f11d243fed88b2e4acc1566ea912c | Delete unread messages | scripts/read.py | scripts/read.py | import time
import cache
import vkapi
from log import datetime_format
def main(a, args):
dialogs = a.messages.getDialogs(unread=1)['items']
messages = {}
users = []
chats = []
for msg in dialogs:
def cb(req, resp):
messages[req['peer_id']] = resp['items'][::-1]
a.messages.getHistory.delayed(peer_id=vkapi.utils.getSender(msg['message']), count=min(msg['unread'], 10)).callback(cb)
if 'chat_id' in msg['message']:
chats.append(msg['message']['chat_id'])
else:
users.append(msg['message']['user_id'])
uc = cache.UserCache(a, 'online')
cc = cache.ConfCache(a)
uc.load(users)
cc.load(chats)
a.sync()
if dialogs:
print('-------------------------\n')
else:
print('Nothing here')
for msg in dialogs:
m = msg['message']
if 'chat_id' in m:
print('Chat "{}" ({}): {}'.format(cc[m['chat_id']]['title'], m['chat_id'], msg['unread']))
else:
print('{} {} ({}){}: {}'.format(uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name'], m['user_id'],
', online' if uc[m['user_id']]['online'] else '', msg['unread']))
print()
for i in messages[vkapi.utils.getSender(msg['message'])]:
print('[{}] {}'.format(time.strftime(datetime_format, time.localtime(i['date'])), i['body']))
print()
print('-------------------------\n')
if args:
print(flush=True)
mr = vkapi.MessageReceiver(a)
while True:
time.sleep(1)
for m in mr.getMessages():
if 'chat_id' in m:
print('Chat "{}" ({}), {} {}:'.format(cc[m['chat_id']]['title'], m['chat_id'],
uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name']))
else:
print('{} {} ({}):'.format(uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name'], m['user_id']))
print('[{}] {}'.format(time.strftime(datetime_format, time.localtime(m['date'])), m['body']))
print(flush=True)
| Python | 0.000015 | @@ -701,16 +701,30 @@
.sync()%0A
+ mids = %5B%5D%0A
if d
@@ -723,32 +723,32 @@
if dialogs:%0A
-
print('-
@@ -1416,32 +1416,104 @@
print()%0A
+ if 'chat_id' not in m:%0A mids.append(i%5B'id'%5D)%0A
print('-
@@ -1549,16 +1549,23 @@
%0A if
+'t' in
args:%0A
@@ -2216,32 +2216,32 @@
)), m%5B'body'%5D))%0A
-
@@ -2238,28 +2238,179 @@
print(flush=True)%0A
+ elif 'd' in args and mids:%0A print('Deleting %7B%7D messages'.format(len(mids)))%0A a.messages.delete(message_ids=','.join(map(str, mids)))%0A
|
46e2997cb51e45dc58f5a97cea6642ba64d03188 | Fix 9.0 version | purchase_all_shipments/__openerp__.py | purchase_all_shipments/__openerp__.py | # Author: Leonardo Pistone
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{'name': 'Purchase All Shipments',
'version': '8.0.1.0.0',
'author': "Camptocamp,Odoo Community Association (OCA)",
'category': 'Purchases',
'license': 'AGPL-3',
'depends': ['purchase'],
'data': ['view/purchase_order.xml'],
}
| Python | 0 | @@ -796,9 +796,9 @@
': '
-8
+9
.0.1
|
05bfbd0dbe50866a2c26a4018d4a2786af9b06fe | Fix decoding when the object is a bytestring. | template_help/templatetags/template_help.py | template_help/templatetags/template_help.py | import datetime
from decimal import Decimal
from django.http import HttpRequest
from django import template
from django.template.defaultfilters import mark_safe, force_escape, pluralize
from django.db.models.query import QuerySet
from django.forms import BaseForm, Field
from django.db.models import Model
from django.core import urlresolvers
from django.core.paginator import Paginator, Page
register = template.Library()
def is_iterable(x):
"A implementation independent way of checking for iterables"
try:
iter(x)
return True
except (SystemExit, KeyboardInterrupt):
raise
except:
pass
return False
@register.tag
def context_help(parser, token):
"""
Renders a table listing each item in the context, along with a synopsis of the value.
e.g. Context({'users':User.objects.all()}) ->
<table>
<tr><td>users</td><td>Queryset of XXX <a href="/admin/doc/models/auth.user/">auth.user</a></td></tr>
</table>
The idea is that a Django dev can write views up to a context, and provide a stub template to a designer;
the designer could then use the help provided by this tag to work with the given context.
Normally, lists rendered as just counts w/ the 0th item taken as an examplar.
Tuples are listed out, unless they are particularly long, in which case an exemplar is shown.
Dictionaries are always listed out. Doubly-nested dicts are not shown.
(If you're nesting context that much, you're probably doing something silly.)
Too bad forms aren't registered in Admin docs, but at least the fields are listed out here.
"""
return ContextHelpNode()
class ContextHelpNode(template.Node):
def render_explanation(self, o):
if isinstance(o, HttpRequest):
return "<a href='http://docs.djangoproject.com/en/dev/ref/request-response/#ref-request-response'>request object</a>"
elif isinstance(o, (QuerySet,Model)):
if isinstance(o, QuerySet):
prefix = "Queryset of "
o = o.model
else:
m = o
#link to model docs
app_label, model_name = o._meta.app_label, o._meta.object_name.lower()
url = urlresolvers.reverse('django-admindocs-models-detail', args=[app_label, model_name])
return "<a href='%s'>%s</a>" % (force_escape(url),
force_escape("%s.%s" % (app_label, model_name)))
elif isinstance(o, BaseForm):
return "<p>%s fields:</p>\n<ul>%s</ul>" % (
o.__class__.__name__,
"\n".join(["<li>%s</li>" % force_escape(field) for field in o.fields])
)
elif isinstance(o, (set, list, tuple, dict)):
return "group of %s items" % len(o)
elif isinstance(o, (basestring, int, Decimal, float, datetime.date, datetime.time, datetime.datetime)):
return force_escape(unicode(o))
else:
type_ = type(o)
str_ = unicode(o)
return force_escape("%s: %s" % (type_, str_))
def render_row(self, results, label, explanation):
results.append("<tr><td class='label'>%s</td><td class='explanation'>%s</td></tr>" % (force_escape(label),explanation))
def render_item(self, results, label, o):
if isinstance(o, BaseForm):
self.render_row(results, label, self.render_explanation(o))
elif isinstance(o, tuple):
if len(o) < 10:
if len(o) == 0:
self.render_row(results, label, "Empty tuple")
return ""
self.render_row(results, label, force_escape("tuple %s:%s") + ",".join([(i,self.render_explanation(val))
for (i,val) in enumerate(o)]))
else:
self.render_row(results, label, "Long tuple-- %s items -- e.g. %s.0=>%s " % (len(o), force_escape(label), self.render_explanation(o[0])))
elif isinstance(o, (set, list, QuerySet)) or (is_iterable(o) and not isinstance(o, basestring)):
if isinstance(o, set):
seq_type = "Set"
elif isinstance(o, list):
seq_type = "List"
elif isinstance(o, QuerySet):
seq_type = "Queryset"
else:
seq_type = "Sequence (%s)" % (o,)
try:
o_l = len(o)
except TypeError:
o_l = "<Unknown>"
if o_l == 0:
self.render_row(results, label, force_escape("Empty %s" % seq_type))
return
o = iter(o).next()
self.render_row(results, label, force_escape("%s of %s " % (seq_type, o_l)) + (
self.render_explanation(o)))
else:
self.render_row(results, label, self.render_explanation(o))
def render(self, context):
#flatten context into a standard dict.
if isinstance(context, dict):
d = context
else:
d = {}
for inner_d in reversed(context.dicts):
d.update(inner_d)
results = ["<table class='context_help'>"]
if not d:
return "<p>Empty context</p>"
for k,v in sorted(d.items(), key=lambda t: t[0].lower()):
if isinstance(v,dict):
results.append("<tr><td class='label'>%s</td><td class='explanation verbose'><table>" % force_escape(k))
for inner_k, inner_v in v.items():
label = "%s.%s" % (k, inner_k)
if isinstance(inner_v, dict):
self.render_row(results, "Too many nested dicts", "stopping at %s" % (label,))
else:
self.render_item(results, label, inner_v)
results.append("</table></td></tr>")
else:
self.render_item(results, k, v)
results.append("</table>")
return "\n".join(results)
| Python | 0.000304 | @@ -2835,19 +2835,102 @@
(o,
-(basestring
+str):%0A return force_escape(unicode(o, 'utf-8'))%0A elif isinstance(o, (unicode
, in
|
35ed2dae541e7a0f3624c063f738f1617f502af0 | Correct service usage in IE Binding. Fixes #7749 | py/selenium/webdriver/ie/webdriver.py | py/selenium/webdriver/ie/webdriver.py | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import warnings
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from .service import Service
from .options import Options
DEFAULT_TIMEOUT = 30
DEFAULT_PORT = 0
DEFAULT_HOST = None
DEFAULT_LOG_LEVEL = None
DEFAULT_SERVICE_LOG_PATH = None
class WebDriver(RemoteWebDriver):
""" Controls the IEServerDriver and allows you to drive Internet Explorer """
def __init__(self, executable_path='IEDriverServer.exe', capabilities=None,
port=DEFAULT_PORT, timeout=DEFAULT_TIMEOUT, host=DEFAULT_HOST,
log_level=DEFAULT_LOG_LEVEL, service_log_path=DEFAULT_SERVICE_LOG_PATH,
options=None, service=None,
desired_capabilities=None, keep_alive=False):
"""
Creates a new instance of the chrome driver.
Starts the service and then creates new instance of chrome driver.
:Args:
- executable_path - Deprecated: path to the executable. If the default is used it assumes the executable is in the $PATH
- capabilities - Deprecated: capabilities Dictionary object
- port - Deprecated: port you would like the service to run, if left as 0, a free port will be found.
- timeout - Deprecated: no longer used, kept for backward compatibility
- host - Deprecated: IP address for the service
- log_level - Deprecated: log level you would like the service to run.
- service_log_path - Deprecated: target of logging of service, may be "stdout", "stderr" or file path.
- options - IE Options instance, providing additional IE options
- desired_capabilities - Deprecated: alias of capabilities; this will make the signature consistent with RemoteWebDriver.
- keep_alive - Whether to configure RemoteConnection to use HTTP keep-alive.
"""
if executable_path != 'IEDriverServer.exe':
warnings.warn('executable_path has been deprecated, please pass in a Service object',
DeprecationWarning, stacklevel=2)
if capabilities is not None:
warnings.warn('capabilities has been deprecated, please pass in a Service object',
DeprecationWarning, stacklevel=2)
if port != DEFAULT_PORT:
warnings.warn('port has been deprecated, please pass in a Service object',
DeprecationWarning, stacklevel=2)
self.port = port
if timeout != DEFAULT_TIMEOUT:
warnings.warn('timeout has been deprecated, please pass in a Service object',
DeprecationWarning, stacklevel=2)
if host != DEFAULT_HOST:
warnings.warn('host has been deprecated, please pass in a Service object',
DeprecationWarning, stacklevel=2)
self.host = host
if log_level != DEFAULT_LOG_LEVEL:
warnings.warn('log_level has been deprecated, please pass in a Service object',
DeprecationWarning, stacklevel=2)
if service_log_path != DEFAULT_SERVICE_LOG_PATH:
warnings.warn('service_log_path has been deprecated, please pass in a Service object',
DeprecationWarning, stacklevel=2)
# If both capabilities and desired capabilities are set, ignore desired capabilities.
if capabilities is None and desired_capabilities:
capabilities = desired_capabilities
if options is None:
if capabilities is None:
capabilities = self.create_options().to_capabilities()
else:
if capabilities is None:
capabilities = options.to_capabilities()
else:
# desired_capabilities stays as passed in
capabilities.update(options.to_capabilities())
if service is None:
service = Service()
self.iedriver = Service(
executable_path,
port=self.port,
host=self.host,
log_level=log_level,
log_file=service_log_path)
self.iedriver.start()
RemoteWebDriver.__init__(
self,
command_executor='http://localhost:%d' % self.port,
desired_capabilities=capabilities,
keep_alive=keep_alive)
self._is_remote = False
def quit(self):
RemoteWebDriver.quit(self)
self.iedriver.stop()
def create_options(self):
return Options()
| Python | 0 | @@ -4639,24 +4639,28 @@
service is
+not
None:%0A
@@ -4671,26 +4671,48 @@
se
-rvice = S
+lf.iedriver = s
ervice
-()
%0A
+ else:%0A
@@ -4752,16 +4752,20 @@
+
executab
@@ -4781,24 +4781,28 @@
+
port=self.po
@@ -4797,32 +4797,36 @@
port=self.port,%0A
+
host
@@ -4841,32 +4841,36 @@
st,%0A
+
log_level=log_le
@@ -4874,16 +4874,20 @@
_level,%0A
+
|
5aca45a68a229f43a25dd97d2c680716c9baabf5 | add travis env to sgen | scripts/sgen.py | scripts/sgen.py | #!/usr/bin/python
# Generate original static file to another with new prefix
# ./sgen index.html old_prefix static_index.html new_prefix
import sys
from os import walk, path
# File lists
# The two file lists should be aligned.
files = []
for (dirpath, dirname, filenames) in walk("../static"):
for f in filenames:
if ".html" in f:
files.append(dirpath + "/" + f)
# prefix of target files
target_prefix = "../docs"
target_files = []
for f in files:
target_files.append(f.replace("../static", target_prefix))
print(target_files)
# Variables of parsing
def parse_args():
if len(sys.argv) < 3:
print ("Not enough arguments")
exit(1)
original_prefix = sys.argv[1]
new_prefix = sys.argv[2]
# unsafe checkout prefix
if original_prefix[0] != 'h' or original_prefix[-1] != '/' or new_prefix[0] != 'h' or new_prefix[-1] != '/':
print ("Seems something wrong on the prefix")
exit(1)
return original_prefix, new_prefix
def sgen():
original_prefix, new_prefix = parse_args()
# parse the publications_ref into the appropriate html format
for i in range(len(files)):
with open(files[i]) as f:
content = f.read()
new_content = content.replace(original_prefix, new_prefix)
with open(target_files[i], "w+") as f:
f.write(new_content)
sgen() | Python | 0 | @@ -169,16 +169,25 @@
lk, path
+, environ
%0A%0A# File
@@ -234,16 +234,51 @@
igned.%0A%0A
+root = environ%5B'TRAVIS_BUILD_DIR'%5D%0A
files =
@@ -322,19 +322,25 @@
in walk(
-%22..
+ root + %22
/static%22
@@ -477,19 +477,24 @@
refix =
-%22..
+root + %22
/docs%22%0At
@@ -564,11 +564,17 @@
ace(
-%22..
+ root + %22
/sta
@@ -1408,12 +1408,13 @@
ent)%0A%0Asgen()
+%0A
|
62184bf35db9fe4bf397211d46f1dee43096f9fb | Clarify method with a docstring | guild/plugins/tensorflow_util.py | guild/plugins/tensorflow_util.py | # Copyright 2017-2019 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import time
from guild import python_util
from guild import util
from guild.plugin import Plugin
class SummaryPlugin(Plugin):
"""Summary plugin base class.
Summary plugins log additional summary values (e.g. GPU usage,
etc.) per logged summary. This class is used to patch the TF env
to handle `add_summary` of `tensorflow.summary.FileWriter` and of
`tensorboardX.writer.SummaryToEventTransformer`.
"""
MIN_SUMMARY_INTERVAL = 5
def __init__(self, ep):
super(SummaryPlugin, self).__init__(ep)
self._summary_cache = SummaryCache(self.MIN_SUMMARY_INTERVAL)
def patch_env(self):
self._try_patch_tensorflow()
def _try_patch_tensorflow(self):
util.try_apply([
self._try_listen_tf1,
self._try_listen_tf2,
self._log_listen_failed])
def _try_listen_tf1(self):
try:
from tensorflow.summary import FileWriter
except Exception as e:
self.log.debug(
"error importing tensorflow.summary.FileWriter: %s", e)
raise util.TryFailed()
else:
self.log.debug(
"wrapping tensorflow.summary.FileWriter.add_summary")
python_util.listen_method(
FileWriter, "add_summary",
self._handle_summary)
def _try_listen_tf2(self):
try:
from tensorboard.plugins.scalar import summary_v2
from tensorflow import summary
except Exception as e:
self.log.debug(
"error importing tensorboard.plugins.scalar.summary_v2: %s", e)
raise util.TryFailed()
else:
self.log.debug(
"wrapping tensorboard.plugins.scalar.summary_v2.scalar")
python_util.listen_function(
summary_v2, "scalar",
self._handle_scalar)
python_util.listen_function(
summary, "scalar",
self._handle_scalar)
def _log_listen_failed(self):
self.log.warning(
"unable to find TensorFlow summary writer, skipping "
"summaries for %s", self.name)
def _handle_summary(self, add_summary, _summary, global_step=None):
"""Callback to apply summary values via add_summary callback.
This is the TF 1.x API for logging scalars.
See SummaryPlugin docstring above for background.
"""
vals = self._summary_values(global_step)
if vals:
self.log.debug("summary values via add_summary: %s", vals)
summary = tf_scalar_summary(vals)
add_summary(summary, global_step)
def _summary_values(self, global_step):
if self._summary_cache.expired():
self.log.debug("reading summary values")
try:
vals = self.read_summary_values(global_step)
except:
self.log.exception("reading summary values")
vals = {}
self._summary_cache.reset_for_step(global_step, vals)
return self._summary_cache.for_step(global_step)
def _handle_scalar(self, scalar, name, data, step=None, description=None):
"""Callback to apply summary values via scalars API.
This is the TF 2.x API for logging scalars.
"""
# pylint: disable=unused-argument
vals = self._summary_values(step)
if vals:
self.log.debug("summary values via scalar: %s", vals)
for tag, val in vals.items():
if val is None:
continue
scalar(tag, val, step)
@staticmethod
def read_summary_values(_global_step):
return {}
def tf_scalar_summary(vals):
# pylint: disable=import-error,no-name-in-module
from tensorflow.core.framework.summary_pb2 import Summary
return Summary(value=[
Summary.Value(tag=key, simple_value=val)
for key, val in vals.items()
])
class SummaryCache(object):
def __init__(self, timeout):
self._timeout = timeout
self._expires = None
self._step = None
self._val = None
def expired(self):
return self._expires is None or time.time() >= self._expires
def reset_for_step(self, step, val):
self._expires = time.time() + self._timeout
self._step = step
self._val = val
def for_step(self, step):
return self._val if step == self._step else None
| Python | 0.000066 | @@ -4334,32 +4334,72 @@
(_global_step):%0A
+ %22%22%22Overridden by subclasses.%22%22%22%0A
return %7B
|
24cebbd351875103067162733cf682320df29cf6 | Update VMfileconvert_V2.py | pyecog/light_code/VMfileconvert_V2.py | pyecog/light_code/VMfileconvert_V2.py | import glob, os, numpy
import stfio
def main():
searchpath = os.getcwd()
exportdirectory = searchpath+'/ConvertedFiles/'
# Make export directory
if not os.path.exists(exportdirectory):
os.makedirs(exportdirectory)
# Walk through and find abf files
pattern = '*.abf'
datafilenames = glob.glob(pattern)
if datafilenames:
for filename in datafilenames:
print ('Converting '+str(filename))
data = stfio.read(filename,ftype = "abf")
x = data.aspandas()
x = x.values
numpy.save(exportdirectory+filename[0:-4],x)
if __name__ == '__main__':
main()
| Python | 0 | @@ -15,17 +15,109 @@
s, numpy
-%0A
+, sys%0Atry:%0A import stfio%0Aexcept:%0A sys.path.append('C:%5CPython27%5CLib%5Csite-packages')%0A
import s
|
52cbd272ec08a382b4f16dca1579a3ef72365069 | use numpy mean | examples/train_multi_gpu.py | examples/train_multi_gpu.py | '''
Created on Feb 6, 2017
@author: julien
'''
import numpy
from os.path import join
import tempfile
from keras.metrics import categorical_accuracy
from examples.ga.dataset import get_reuters_dataset
from minos.experiment.experiment import Experiment, ExperimentParameters
from minos.experiment.training import Training, AccuracyDecreaseStoppingCondition,\
get_associated_validation_metric
from minos.model.design import create_random_blueprint
from minos.model.model import Objective, Optimizer, Metric, Layout
from minos.train.trainer import ModelTrainer
from minos.utils import setup_console_logging
import numpy as np
np.random.seed(1337)
def create_experiment(input_size, output_size, batch_size):
training = Training(
objective=Objective('categorical_crossentropy'),
optimizer=Optimizer(optimizer='Adam'),
metric=Metric('categorical_accuracy'),
stopping=AccuracyDecreaseStoppingCondition(
metric='categorical_accuracy',
min_epoch=5,
max_epoch=25,
noprogress_count=5),
batch_size=batch_size)
parameters = ExperimentParameters(use_default_values=True)
layout = Layout(
input_size=input_size,
output_size=output_size,
output_activation='softmax')
experiment = Experiment(
label='reuters_train_multi_gpu',
layout=layout,
training=training,
parameters=parameters)
return experiment
def train_multi_gpu(max_words = 1000, batch_size=32):
batch_iterator, test_batch_iterator, nb_classes = get_reuters_dataset(batch_size, max_words)
experiment = create_experiment(max_words, nb_classes, batch_size)
blueprint = create_random_blueprint(experiment)
devices = ['/gpu:0', '/gpu:1']
trainer = ModelTrainer(batch_iterator, test_batch_iterator)
with tempfile.TemporaryDirectory() as tmp_dir:
model, history, _duration = trainer.train(
blueprint,
devices,
save_best_model=True,
model_filename=join(tmp_dir, 'model'))
metric = get_associated_validation_metric(blueprint.training.metric.metric)
epoch = numpy.argmax(history.history[metric])
score = history.history[metric][epoch]
print('Final training score %r after %d epoch' % (score, epoch))
test_size = 10
y_true = test_batch_iterator.y[0][:test_size]
y_pred = model.predict(test_batch_iterator.X[0][:test_size])
print('Predictions (true, pred) %r' % list(zip(y_true.tolist(), y_pred.tolist())))
evaluation = numpy.mean(numpy.argmax(y_true)==numpy.argmax(y_pred))
print('Final evaluation score %f' % evaluation)
def main():
setup_console_logging('DEBUG')
train_multi_gpu()
if __name__ == '__main__':
main()
| Python | 0.000225 | @@ -2362,16 +2362,29 @@
_true =
+numpy.argmax(
test_bat
@@ -2411,16 +2411,17 @@
st_size%5D
+)
%0A
@@ -2430,16 +2430,29 @@
_pred =
+numpy.argmax(
model.pr
@@ -2498,98 +2498,8 @@
ze%5D)
-%0A print('Predictions (true, pred) %25r' %25 list(zip(y_true.tolist(), y_pred.tolist()))
)%0A
@@ -2532,51 +2532,23 @@
ean(
-numpy.argmax(
y_true
-)
==
-numpy.argmax(
y_pred)
-)
%0A
@@ -2604,16 +2604,111 @@
on)%0A
+ print('Predictions (true, pred) %25r' %25 list(zip(y_true.tolist(), y_pred.tolist())))%0A
%0Adef mai
|
a49ddd64758b23565870439bda36fafd5e1dac39 | Put survey back otherwise | gwells/urls.py | gwells/urls.py | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from . import views
from gwells.views import *
from gwells.views.admin import *
from django.views.generic import TemplateView
# Creating 2 versions of the app_root. One without and one with trailing slash
# This will allow for any or no additional app_root context to be provided
app_root = settings.APP_CONTEXT_ROOT
if app_root:
app_root_slash = app_root + '/'
else:
app_root_slash = app_root
urlpatterns = [
# url(r'^'+ app_root +'$', views.HomeView.as_view(), name='home'),
url(r'^'+ app_root_slash +'robots\.txt$', TemplateView.as_view(template_name='robots.txt', content_type='text/plain'), name='robots'),
url(r'^'+ app_root_slash +'$', SearchView.well_search, name='home'),
url(r'^'+ app_root_slash +'search$', SearchView.well_search, name='search'),
# url(r'^(?P<pk>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/$', views.DetailView.as_view(), name='detail'),
url(r'^'+ app_root_slash +'well/(?P<pk>[0-9]+)$', WellDetailView.as_view(), name='well_detail'),
url(r'^'+ app_root_slash +'registry-legacy$', RegistryView.as_view(), name='registry-legacy'),
url(r'^'+ app_root_slash +'submission/(?P<pk>[0-9]+)$', ActivitySubmissionDetailView.as_view(), name='activity_submission_detail'),
url(r'^'+ app_root_slash +'health$', HealthView.health, name='health'),
url(r'^'+ app_root_slash +'groundwater-information', TemplateView.as_view(template_name='gwells/groundwater_information.html'), name='groundwater_information'),
url(r'^'+ app_root_slash +'ajax/map_well_search/$', SearchView.map_well_search, name='map_well_search'),
url(r'^'+ app_root_slash +'registries/', include('registries.urls')),
]
if settings.ENABLE_DATA_ENTRY:
urlpatterns = [
url(r'^'+ app_root_slash +'submission/$', ActivitySubmissionListView.as_view(), name='activity_submission_list'),
url(r'^'+ app_root_slash +'submission/create$', ActivitySubmissionWizardView.as_view(views.FORMS), name='activity_submission_create'),
url(r'^'+ app_root_slash +'site_admin', AdminView.as_view(), name='site_admin'),
url(r'^'+ app_root_slash +'admin/survey/(?P<pk>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})$', SurveyView.as_view(), name='survey'),
] + urlpatterns
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
url(r'^'+ app_root_slash +'admin/', include(admin.site.urls)),
] + urlpatterns
| Python | 0 | @@ -2745,24 +2745,112 @@
te_admin'),%0A
+ url(r'%5E'+ app_root_slash +'admin/survey', SurveyView.as_view(), name='survey'),%0A
url(
|
393bde7e7f3902f734e8c01f265b216f2d3eef26 | remove leftover | models/dulsine_commons.py | models/dulsine_commons.py | # -*- coding: utf-8 -*-
# vim: set ts=4
# Common enumerations used in some places
CIVILITES = (
('M.', 'Monsieur'),
('Mme', 'Madame'),
('Mlle', 'Mademoiselle')
)
CIRCUITS = (
('O', 'ouvert'),
('F', 'ferme'),
('N', 'pas de circuit')
)
TYPES_ACTEURS = (
('P', 'Professionnels'),
('A', 'Amateurs'),
('M', 'Mixte')
)
TEAM_TYPES = (
(0, 'PAPS'),
(1, 'Equipe'),
(2, 'Binome'),
(3, 'Equipe d\'Evacuation')
)
DIPLOME_SECOURS = (
(0, 'N.D.'),
(1, 'CI'),
(2, 'PSE2'),
(3, 'PSE1'),
(4, 'PSC1'),
(5, 'IPS'),
(6, 'CDPE')
)
NOT_AVAILABLE = 0
DIPLOME_CI = 1
DIPLOME_PSE2 = 2
DIPLOME_PSE1 = 3
DIPLOME_PSC1 = 4
DIPLOME_CONDUCTEURS = (
(10, 'CH'),
(11, 'CHA'),
(12, '4x4')
)
DIPLOME_FORMATEURS = (
(20, 'FCI'),
(21, 'PAE1'),
(22, 'PAE2'),
(23, 'PAE3'),
(24, 'PAE4'),
)
FORMATIONS = DIPLOME_SECOURS + DIPLOME_CONDUCTEURS + DIPLOME_FORMATEURS
WISH_ND = 0
WISH_CHOICES = (
(WISH_ND, 'N.D.'),
(1, 'Disponible'),
(2, 'Intéressé'),
(3, 'Très intéressé'),
)
| Python | 0.000017 | @@ -492,44 +492,105 @@
OME_
-SECOURS = (%0A (0, 'N.D.'),%0A (1,
+CI = 0%0ADIPLOME_PSE2 = 1%0ADIPLOME_PSE1 = 2%0ADIPLOME_PSC1 = 3%0A%0ADIPLOME_SECOURS = (%0A (DIPLOME_CI,
'CI
@@ -594,24 +594,35 @@
'CI'),%0A (
+DIPLOME_PSE
2, 'PSE2'),%0A
@@ -626,17 +626,28 @@
),%0A (
-3
+DIPLOME_PSE1
, 'PSE1'
@@ -654,17 +654,28 @@
),%0A (
-4
+DIPLOME_PSC1
, 'PSC1'
@@ -686,9 +686,9 @@
(
-5
+4
, 'I
@@ -703,9 +703,9 @@
(
-6
+5
, 'C
@@ -717,92 +717,8 @@
%0A)%0A%0A
-NOT_AVAILABLE = 0%0ADIPLOME_CI = 1%0ADIPLOME_PSE2 = 2%0ADIPLOME_PSE1 = 3%0ADIPLOME_PSC1 = 4%0A
%0ADIP
|
f9a99102a7053e444021926d08750f04a662fd9f | remove unnecessary print statements | pyspecdata/load_files/open_subpath.py | pyspecdata/load_files/open_subpath.py | from ..core import *
from ..datadir import dirformat
import os.path
from zipfile import ZipFile
def open_subpath(file_reference,*subpath,**kwargs):
"""
Parameters
----------
file_reference: str or tuple
If a string, then it's the name of a directory.
If it's a tuple, then, it has three elements: the ZipFile object, the
filename of the zip file (for reference), and the name of the file we're interested in
within the zip file.
test_only: bool
just test if the path exists
"""
mode,test_only = process_kwargs([('mode','r'),
('test_only',False)],kwargs)
if isinstance(file_reference,basestring):
if test_only:
print "testing",(file_reference,) + subpath
full_path = os.path.join(file_reference, *subpath)
if os.path.exists(full_path):
return True
else:
return False
else:
fp = open(os.path.join(file_reference,*subpath),mode)
else:
if type(file_reference) == tuple:
if len(file_reference) == 3 and type(file_reference[0]) is ZipFile:
zf = file_reference[0]
zip_basename = file_reference[1]
name_inside_zip = file_reference[2]
subfile = '/'.join((name_inside_zip,)+subpath)
if test_only:
if subfile in zf.namelist():
return True
else:
return False
if subfile in zf.namelist():
return zf.open(subfile)
else:
raise ValueError(subfile+" not found in zip file")
else:
raise ValueError("open_subpath doesn't understand the format of the tuple passe to file_reference")
else:
raise ValueError("open_subpath doesn't understand the type of the file_reference")
return fp
| Python | 0.999982 | @@ -696,64 +696,8 @@
ly:%0A
- print %22testing%22,(file_reference,) + subpath%0A
|
ba370231fe80280dec806c7c2515061e8607b360 | Add SCA into mbio | Correlation/__init__.py | Correlation/__init__.py | __author__ = 'Wenzhi Mao'
__all__ = []
def _Startup():
from mbio import _ABSpath
global _path__
_path__ = _ABSpath()
from os import path
Clist = ['mi.c', 'omes.c']
for c in Clist:
if not path.exists(_path__+'/'+c.replace('.c', '_c.so')):
from mbio import _make
_make(_path__+'/'+c)
_Startup()
from . import MI
from .MI import *
__all__.extend(MI.__all__)
from . import OMES
from .OMES import *
__all__.extend(OMES.__all__)
| Python | 0.99929 | @@ -470,16 +470,81 @@
d(OMES.__all__)%0A
+%0Afrom . import SCA%0Afrom .SCA import *%0A__all__.extend(SCA.__all__)
|
2277c82efdc456e5873987eabac88810b2cece5b | Fix pep8 whitespace violation. | lms/djangoapps/courseware/features/video.py | lms/djangoapps/courseware/features/video.py | #pylint: disable=C0111
from lettuce import world, step
from common import *
############### ACTIONS ####################
@step('when I view it it does autoplay')
def does_autoplay(step):
assert(world.css_find('.video')[0]['data-autoplay'] == 'True')
@step('the course has a Video component')
def view_video(step):
coursename = TEST_COURSE_NAME.replace(' ', '_')
i_am_registered_for_the_course(step, coursename)
# Make sure we have a video
video = add_video_to_course(coursename)
chapter_name = TEST_SECTION_NAME.replace(" ", "_")
section_name = chapter_name
url = django_url('/courses/edx/Test_Course/Test_Course/courseware/%s/%s' %
(chapter_name, section_name))
world.browser.visit(url)
def add_video_to_course(course):
template_name = 'i4x://edx/templates/video/default'
world.ItemFactory.create(parent_location=section_location(course),
template=template_name,
display_name='Video')
| Python | 0.000003 | @@ -422,20 +422,16 @@
sename)%0A
-
%0A # M
@@ -716,20 +716,16 @@
_name))%0A
-
%0A wor
|
54b0feebb18816a936f4a7f323a77808f9973eb2 | Update testes.py | Src/testes.py | Src/testes.py | import jogovelha
import sys
erroInicializar = False
jogo = jogovelha.inicializar()
if len(jogo) != 3:
erroInicializar = True
else:
for linha in jogo:
if len(linha) != 3:
erroInicializar = True
else:
for elemento in linha:
if elemento != ".":
erroInicializar = True
if erroInicializar:
sys.exit(1)
else:
sys.exit(0) | Python | 0 | @@ -297,17 +297,17 @@
nto != %22
-.
+X
%22:%0A
@@ -401,8 +401,9 @@
.exit(0)
+%0A
|
582cc4ebe6f8177c407ce0ef1f5b9e1b23226b80 | Version 0.2: support for python3 | vlogging/__init__.py | vlogging/__init__.py | # -*- coding: utf-8 -*-
from io import BytesIO as StringIO
from string import Template
import base64
__version__ = "0.1"
renderers = []
try:
import cv2
import numpy
def render_opencv(img, fmt="png"):
if not isinstance(img, numpy.ndarray):
return None
retval, buf = cv2.imencode(".%s" % fmt, img)
if not retval:
return None
return buf, "image/%s" % fmt
renderers.append(render_opencv)
except ImportError:
pass
try:
from PIL import Image
def render_pil(img, fmt="png"):
if not callable(getattr(img, "save", None)):
return None
output = StringIO()
img.save(output, format=fmt)
contents = output.getvalue()
output.close()
return contents, "image/%s" % fmt
renderers.append(render_pil)
except ImportError:
pass
try:
import pylab
def render_pylab(img, fmt="png"):
if not callable(getattr(img, "savefig", None)):
return None
output = StringIO()
img.savefig(output, format=fmt)
contents = output.getvalue()
output.close()
return contents, "image/%s" % fmt
renderers.append(render_pylab)
except ImportError:
pass
class VisualRecord(object):
def __init__(self, title="", imgs=None, footnotes="", fmt="png"):
self.title = title
self.fmt = fmt
if imgs is None:
imgs = []
self.imgs = imgs
if not isinstance(imgs, (list, tuple, set, frozenset)):
self.imgs = [self.imgs]
self.footnotes = footnotes
def render_images(self):
rendered = []
for img in self.imgs:
for renderer in renderers:
# Trying renderers we have one by one
res = renderer(img, self.fmt)
if res is None:
continue
else:
rendered.append(res)
break
return "".join(
Template('<img src="data:$mime;base64,$data" />').substitute({
"data": base64.b64encode(data),
"mime": mime
}) for data, mime in rendered)
def render_footnotes(self):
if not self.footnotes:
return ""
return Template("<pre>$footnotes</pre>").substitute({
"footnotes": self.footnotes
})
def __str__(self):
t = Template(
"""
<h4>$title</h4>
$imgs
$footnotes
<hr/>""")
return t.substitute({
"title": self.title,
"imgs": self.render_images(),
"footnotes": self.render_footnotes()
})
| Python | 0.000003 | @@ -117,9 +117,9 @@
%220.
-1
+2
%22%0Are
|
73ceff96b2f065517a7d67cb0b25361f5bd61388 | Delete fixture after running tests | src/gramcore/filters/tests/test_edges.py | src/gramcore/filters/tests/test_edges.py | """Tests for module gramcore.filters.edges"""
import os
import numpy
from PIL import Image, ImageDraw
from nose.tools import assert_equal
from skimage import io
from gramcore.filters import edges
def setup():
"""Create image fixture
The background color is set by default to black (value == 0).
.. note::
Although the rectangle should be 10x10 in reality it returns an 11x11.
If the image is read with io.imread, then the colored pixels and their
neighbours can be accessed with arr[9:22, 4:17].
"""
img = Image.new('L', (20, 40))
draw = ImageDraw.Draw(img)
draw.rectangle([(5, 10), (15, 20)], fill=255)
img.save('white-square.tif')
del draw
def teardown():
"""Delete fixture"""
#os.remove('white-square.tif')
def test_canny():
"""Apply canny to grey image and check return values
.. warning::
This seems to produce some artifacts. The fixture is a black
image with a white 11x11 rectangle. Thus you expect you get 44 (4*11)
pixels of edges. Instead it gets 50, when sigma is 1 and 40 when sigma
is 2. In both cases the shape is not correct.
"""
img = io.imread('white-square.tif')
parameters = {'data': [img], 'sigma': 1.0}
result = edges.canny(parameters)
# this should be 44 check the resulting image with
#result *= 255
#io.imsave('result.tif', result)
assert_equal(result.sum(), 50)
def test_prewitt():
"""Apply prewitt to grey image and check return values
.. note::
This produces correct shape though it shrinks it by 2 pixels, there
are no edge pixels on the corners and each edge has a width of 2
pixels. Based on the original rectangle size, which is 11x11, and the
above issues it returns 4*9*2 = 72 edge pixels.
"""
img = io.imread('white-square.tif')
parameters = {'data': [img]}
result = edges.prewitt(parameters)
result = result.astype('uint8')
assert_equal(result.sum(), 72)
def test_sobel():
"""Apply sobel to grey image and check return values
.. note::
This produces correct shape though it shrinks it by 2 pixels and each
edge has a width of 2 pixels. Based on the original rectangle size,
which is 11x11, and the above issues it returns 4*9*2 + 4 = 76 edge
pixels.
"""
img = io.imread('white-square.tif')
parameters = {'data': [img]}
result = edges.sobel(parameters)
result = result.astype('uint8')
assert_equal(result.sum(), 76)
| Python | 0 | @@ -752,17 +752,16 @@
%22%22%22%0A
-#
os.remov
@@ -1442,17 +1442,16 @@
, 50)%0A%0A%0A
-%0A
def test
|
f949f2fe0e0c66b850700ae56925598ced8fc260 | Disable Sentry logging | app/main.py | app/main.py | import newrelic.agent
newrelic.agent.initialize()
from os import path
from raven.contrib.tornado import AsyncSentryClient
import tornado.ioloop
import tornado.locale
import tornado.web
import tornado.httpserver
import tornado.options
import os
import yaml
import logging
import string
import datetime, time
from main.helper import *
from main.db import *
# global variables (and list of all used environment variables)
APP_VERSION = os.getenv('VERSION', tornado.version)
APP_DEBUG = str(os.getenv('DEBUG', 'false')).lower() == 'true'
APP_PORT = os.getenv('PORT', 3000)
APP_COOKIE_DOMAIN = os.getenv('COOKIE_DOMAIN', '.entu.ee')
APP_AUTH_URL = os.getenv('AUTH_URL', 'https://auth.entu.ee')
APP_MONGODB = os.getenv('MONGODB', 'mongodb://entu_mongodb:27017/')
APP_MYSQL_HOST = os.getenv('MYSQL_HOST', 'localhost')
APP_MYSQL_DATABASE = os.getenv('MYSQL_DATABASE')
APP_MYSQL_USER = os.getenv('MYSQL_USER')
APP_MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD')
APP_MYSQL_SSL_PATH = os.getenv('MYSQL_SSL_PATH')
APP_CUSTOMERGROUP = os.getenv('CUSTOMERGROUP')
# APP_SENTRY = os.getenv('SENTRY_DSN')
APP_INTERCOM_KEY = os.getenv('INTERCOM_KEY')
# List of controllers to load.
app_controllers = [
'api.api',
'api.api2',
'api.erply',
'api.websocket',
'entity.csv_import',
'entity.entity',
'importers.cmdi',
'library.ester',
'library.photo',
'main.config',
'main.status',
'public.public',
'update.update',
'user.auth',
'user.user',
]
class MainPage(myRequestHandler):
"""
Redirects / to site's default path.
"""
def get(self):
self.redirect(self.app_settings('path'))
class PageNotFound(myRequestHandler):
"""
"""
def get(self, page=None):
self.missing()
class myApplication(tornado.web.Application):
"""
Main Application handler. Imports controllers, settings, translations.
"""
def __init__(self):
# load settings
settings = {
'port': APP_PORT,
'debug': APP_DEBUG,
'template_path': path.join(path.dirname(__file__), '..', 'app'),
'static_path': path.join(path.dirname(__file__), '..', 'static'),
'xsrf_coocies': True,
'login_url': '/auth',
'auth_url': APP_AUTH_URL,
'cookie_domain': APP_COOKIE_DOMAIN,
'intercom_key': APP_INTERCOM_KEY,
'start_time': time.time(),
'request_count': 0,
'request_time': 0,
'slow_request_count': 0,
'slow_request_time': 0,
'slow_request_ms': 1000,
'mongodb': APP_MONGODB,
'database-host': APP_MYSQL_HOST,
'database-database': APP_MYSQL_DATABASE,
'database-user': APP_MYSQL_USER,
'database-password': APP_MYSQL_PASSWORD,
'database-ssl-path': APP_MYSQL_SSL_PATH,
'customergroup': APP_CUSTOMERGROUP,
'mongodbs': {},
'databases': {},
}
# load handlers
handlers = [(r'/', MainPage)]
for controller in app_controllers:
c = __import__ (controller, globals(), locals(), ['*'], -1)
handlers.extend(c.handlers)
for h in c.handlers:
settings.setdefault('paths', {}).setdefault('%s.py' % controller, []).append(h[0])
handlers.append((r'(.*)', PageNotFound))
logging.warning('Tornado %s started to listen %s' % (APP_VERSION, APP_PORT))
# init application
# logging.debug('App settings:\n%s' % yaml.safe_dump(settings, default_flow_style=False, allow_unicode=True))
tornado.web.Application.__init__(self, handlers, **settings)
if __name__ == '__main__':
tornado.options.parse_command_line()
tornado.locale.load_translations(path.join(path.dirname(__file__), 'main', 'translation'))
application = myApplication()
if APP_SENTRY:
application.sentry_client = AsyncSentryClient(dsn=APP_SENTRY, release=APP_VERSION)
server = tornado.httpserver.HTTPServer(application, xheaders=True, max_body_size=1024*1024*1024*5)
if APP_DEBUG:
server.listen(APP_PORT)
else:
server.bind(APP_PORT)
server.start(0)
tornado.ioloop.IOLoop.current().start()
| Python | 0 | @@ -1096,18 +1096,16 @@
GROUP')%0A
-#
APP_SENT
@@ -1108,32 +1108,39 @@
SENTRY =
+ None #
os.getenv('SENT
|
bd8caf6ab48bb1fbefdced7f33edabbdf017894a | Change of names | Demo/sockets/echosvr.py | Demo/sockets/echosvr.py | #! /usr/local/python
# Python implementation of an 'echo' tcp server: echo all data it receives.
#
# This is the simplest possible server, sevicing a single request only.
import sys
from socket import *
# The standard echo port isn't very useful, it requires root permissions!
# ECHO_PORT = 7
ECHO_PORT = 50000 + 7
BUFSIZE = 1024
def main():
if len(sys.argv) > 1:
port = int(eval(sys.argv[1]))
else:
port = ECHO_PORT
s = socket(AF_INET, SOCK_STREAM)
s.bind('', port)
s.listen(0)
conn, (host, remoteport) = s.accept()
print 'connected by', host, remoteport
while 1:
data = conn.recv(BUFSIZE)
if not data:
break
conn.send(data)
main()
| Python | 0.001373 | @@ -493,16 +493,22 @@
%09conn, (
+remote
host, re
@@ -553,16 +553,22 @@
ed by',
+remote
host, re
|
ef08f120adce7a130756d3c0505125d3acf9b8ad | Test pulse clipping fix Osx #1. (#3144) | qiskit/pulse/commands/sample_pulse.py | qiskit/pulse/commands/sample_pulse.py | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Sample pulse.
"""
from typing import Callable, Union, List, Optional
import numpy as np
from qiskit.pulse.channels import PulseChannel
from qiskit.pulse.exceptions import PulseError
from .instruction import Instruction
from .command import Command
class SamplePulse(Command):
"""Container for functional pulse."""
prefix = 'p'
def __init__(self, samples: Union[np.ndarray, List[complex]], name: Optional[str] = None,
epsilon: float = 1e-6):
"""Create new sample pulse command.
Args:
samples: Complex array of pulse envelope
name: Unique name to identify the pulse
epsilon: Pulse sample norm tolerance for clipping.
If any sample's norm exceeds unity by less than or equal to epsilon
it will be clipped to unit norm. If the sample
norm is greater than 1+epsilon an error will be raised
"""
super().__init__(duration=len(samples))
samples = np.asarray(samples, dtype=np.complex_)
self._samples = self._clip(samples, epsilon=epsilon)
self._name = SamplePulse.create_name(name)
@property
def samples(self):
"""Return sample values."""
return self._samples
def _clip(self, samples: np.ndarray, epsilon: float = 1e-6):
"""If samples are within epsilon of unit norm, clip sample by reducing norm by (1-epsilon).
If difference is greater than epsilon error is raised.
Args:
samples: Complex array of pulse envelope
epsilon: Pulse sample norm tolerance for clipping.
If any sample's norm exceeds unity by less than or equal to epsilon
it will be clipped to unit norm. If the sample
norm is greater than 1+epsilon an error will be raised
Returns:
np.ndarray: Clipped pulse samples
Raises:
PulseError: If there exists a pulse sample with a norm greater than 1+epsilon
"""
samples_norm = np.abs(samples)
to_clip = (samples_norm > 1.) & (samples_norm <= 1. + epsilon)
if np.any(to_clip):
clip_where = np.argwhere(to_clip)
clipped_samples = np.exp(1j*np.angle(samples[clip_where]), dtype=np.complex_)
samples[clip_where] = clipped_samples
samples_norm[clip_where] = np.abs(clipped_samples)
if np.any(samples_norm > 1.):
raise PulseError('Pulse contains sample with norm greater than 1+epsilon.')
return samples
def draw(self, dt: float = 1, style: Optional['PulseStyle'] = None,
filename: Optional[str] = None, interp_method: Optional[Callable] = None,
scaling: float = 1, interactive: bool = False):
"""Plot the interpolated envelope of pulse.
Args:
dt: Time interval of samples.
style: A style sheet to configure plot appearance
filename: Name required to save pulse image
interp_method: A function for interpolation
scaling: Relative visual scaling of waveform amplitudes
interactive: When set true show the circuit in a new window
(this depends on the matplotlib backend being used supporting this)
Returns:
matplotlib.figure: A matplotlib figure object of the pulse envelope
"""
# pylint: disable=invalid-name, cyclic-import
from qiskit import visualization
return visualization.pulse_drawer(self, dt=dt, style=style, filename=filename,
interp_method=interp_method, scaling=scaling,
interactive=interactive)
def __eq__(self, other: 'SamplePulse'):
"""Two SamplePulses are the same if they are of the same type
and have the same name and samples.
Args:
other: other SamplePulse
Returns:
bool: are self and other equal
"""
return super().__eq__(other) and (self.samples == other.samples).all()
def __hash__(self):
return hash((super().__hash__(), self.samples.tostring()))
def __repr__(self):
return '%s(%s, duration=%d)' % (self.__class__.__name__, self.name, self.duration)
# pylint: disable=arguments-differ
def to_instruction(self, channel: PulseChannel,
name: Optional[str] = None) -> 'PulseInstruction':
return PulseInstruction(self, channel, name=name)
# pylint: enable=arguments-differ
class PulseInstruction(Instruction):
"""Instruction to drive a pulse to an `PulseChannel`."""
def __init__(self, command: SamplePulse, channel: PulseChannel, name: Optional[str] = None):
super().__init__(command, channel, name=name)
| Python | 0 | @@ -1810,33 +1810,33 @@
lon: float = 1e-
-6
+5
):%0A %22%22%22If
@@ -3090,16 +3090,29 @@
oat = 1,
+%0A
style:
@@ -3186,16 +3186,29 @@
= None,
+%0A
interp_
|
89502d8b8b5e81ba57a16d71c895e0192eae6182 | Update for pandas 17 | hetio/stats.py | hetio/stats.py | import pandas
import matplotlib
import matplotlib.backends.backend_pdf
import seaborn
def get_degrees_for_metanode(graph, metanode):
"""
Return a dataframe that reports the degree of each metaedge for
each node of kind metanode.
"""
metanode_to_nodes = graph.get_metanode_to_nodes()
nodes = metanode_to_nodes.get(metanode, [])
rows = list()
for node in nodes:
for metaedge, edges in node.edges.items():
rows.append((str(node), node.name, str(metaedge), len(edges)))
df = pandas.DataFrame(rows, columns=['node_id', 'node_name', 'metaedge', 'degree'])
return df.sort(['node_name', 'metaedge'])
def plot_degrees_for_metanode(graph, metanode, col_wrap=2, facet_height=4):
"""
Plots histograms of the degree distribution of each metaedge
incident to the metanode. Each metaedge receives a facet in
a seaborn.FacetGrid.
"""
degree_df = get_degrees_for_metanode(graph, metanode)
grid = seaborn.FacetGrid(degree_df, col='metaedge', sharex=False, sharey=False, col_wrap=col_wrap, size=facet_height)
grid.map(seaborn.distplot, 'degree', kde=False)
grid.set_titles('{col_name}')
return grid
def plot_degrees(graph, path):
"""
Creates a multipage pdf with a page for each metanode showing degree
distributions.
"""
# Temporarily disable `figure.max_open_warning`
max_open = matplotlib.rcParams['figure.max_open_warning']
matplotlib.rcParams['figure.max_open_warning'] = 0
pdf_pages = matplotlib.backends.backend_pdf.PdfPages(path)
for metanode in graph.metagraph.get_nodes():
grid = plot_degrees_for_metanode(graph, metanode)
grid.savefig(pdf_pages, format='pdf')
pdf_pages.close()
matplotlib.rcParams['figure.max_open_warning'] = max_open
def get_metanode_df(graph):
rows = list()
for metanode, nodes in graph.get_metanode_to_nodes().items():
series = pandas.Series()
series['metanode'] = metanode
series['abbreviation'] = metanode.abbrev
metaedges = set()
for metaedge in metanode.edges:
metaedges |= {metaedge, metaedge.inverse}
series['metaedges'] = sum([not metaedge.inverted for metaedge in metaedges])
series['nodes'] = len(nodes)
series['unconnected_nodes'] = sum(not any(node.edges.values()) for node in nodes)
rows.append(series)
return pandas.DataFrame(rows).sort('metanode')
def get_metaedge_df(graph):
rows = list()
for metaedge, edges in graph.get_metaedge_to_edges().items():
series = pandas.Series()
series['metaedge'] = str(metaedge)
series['abbreviation'] = metaedge.get_abbrev()
series['inverted'] = int(metaedge.inverted)
series['edges'] = len(edges)
series['source_nodes'] = len(set(edge.source for edge in edges))
series['target_nodes'] = len(set(edge.target for edge in edges))
rows.append(series)
return pandas.DataFrame(rows).sort('metaedge')
| Python | 0 | @@ -618,16 +618,23 @@
df.sort
+_values
(%5B'node_
@@ -2418,24 +2418,31 @@
e(rows).sort
+_values
('metanode')
@@ -2987,16 +2987,23 @@
ws).sort
+_values
('metaed
|
4ffc6c6d361f16feed9673373bd768323dc888bf | Add gpu_snapshot object | hoomd/state.py | hoomd/state.py | from collections import defaultdict
from . import _hoomd
from .data import boxdim
from hoomd.snapshot import Snapshot
from hoomd.local_access import LocalSnapshot
def _create_domain_decomposition(device, box):
""" Create a default domain decomposition.
This method is a quick hack to get basic MPI simulations working with
the new API. We will need to consider designing an appropriate user-facing
API to set the domain decomposition.
"""
if not _hoomd.is_MPI_available():
return None
# if we are only running on one processor, we use optimized code paths
# for single-GPU execution
if device.comm.num_ranks == 1:
return None
# create a default domain decomposition
result = _hoomd.DomainDecomposition(device.cpp_exec_conf,
box.getL(),
0,
0,
0,
False)
return result
class State:
R""" Simulation state.
Parameters:
simulation
snapshot
Attributes:
"""
def __init__(self, simulation, snapshot):
self._simulation = simulation
snapshot._broadcast_box()
domain_decomp = _create_domain_decomposition(
simulation.device,
snapshot._cpp_obj._global_box)
if domain_decomp is not None:
self._cpp_sys_def = _hoomd.SystemDefinition(
snapshot._cpp_obj, simulation.device.cpp_exec_conf,
domain_decomp)
else:
self._cpp_sys_def = _hoomd.SystemDefinition(
snapshot._cpp_obj, simulation.device.cpp_exec_conf)
self._groups = defaultdict(dict)
@property
def snapshot(self):
cpp_snapshot = self._cpp_sys_def.takeSnapshot_double()
return Snapshot._from_cpp_snapshot(cpp_snapshot,
self._simulation.device.comm)
@snapshot.setter
def snapshot(self, snapshot):
R""" Re-initializes the system from a snapshot.
Args:
snapshot:. The snapshot to initialize the system from.
Snapshots temporarily store system data. Snapshots contain the complete
simulation state in a single object. They can be used to restart a
simulation.
Example use cases in which a simulation may be restarted from a snapshot
include python-script-level Monte-Carlo schemes, where the system state
is stored after a move has been accepted (according to some criterion),
and where the system is re-initialized from that same state in the case
when a move is not accepted.
Example::
system = init.read_xml("some_file.xml")
... run a simulation ...
snapshot = system.take_snapshot(all=True)
...
system.restore_snapshot(snapshot)
Warning:
restore_snapshot() may invalidate force coefficients,
neighborlist r_cut values, and other per type quantities if
called within a callback during a run(). You can restore a
snapshot during a run only if the snapshot is of a previous
state of the currently running system. Otherwise, you need to
use restore_snapshot() between run() commands to ensure that all
per type coefficients are updated properly.
"""
if self._simulation.device.comm.rank == 0:
if len(snapshot.particles.types) != len(self.particle_types):
raise RuntimeError(
"Number of particle types must remain the same")
if len(snapshot.bonds.types) != len(self.bond_types):
raise RuntimeError("Number of bond types must remain the same")
if len(snapshot.angles.types) != len(self.angle_types):
raise RuntimeError(
"Number of angle types must remain the same")
if len(snapshot.dihedrals.types) != len(self.dihedral_types):
raise RuntimeError(
"Number of dihedral types must remain the same")
if len(snapshot.impropers.types) != len(self.improper_types):
raise RuntimeError(
"Number of dihedral types must remain the same")
if len(snapshot.pairs.types) != len(self.special_pair_types):
raise RuntimeError("Number of pair types must remain the same")
self._cpp_sys_def.initializeFromSnapshot(snapshot._cpp_obj)
@property
def types(self):
return dict(particle_types=self.particle_types,
bond_types=self.bond_types,
angle_types=self.angles_types,
dihedral_types=self.dihedral_types,
improper_types=self.improper_types,
special_pair_types=self.special_pair_types
)
@property
def particle_types(self):
return self._cpp_sys_def.getParticleData().getTypes()
@property
def bond_types(self):
return self._cpp_sys_def.getBondData().getTypes()
@property
def angle_types(self):
return self._cpp_sys_def.getAngleData().getTypes()
@property
def dihedral_types(self):
return self._cpp_sys_def.getDihedralData().getTypes()
@property
def improper_types(self):
return self._cpp_sys_def.getImproperData().getTypes()
@property
def special_pair_types(self):
return self._cpp_sys_def.getPairData().getTypes()
@property
def box(self):
b = self._cpp_sys_def.getParticleData().getGlobalBox()
L = b.getL()
return boxdim(Lx=L.x, Ly=L.y, Lz=L.z,
xy=b.getTiltFactorXY(),
xz=b.getTiltFactorXZ(),
yz=b.getTiltFactorYZ(),
dimensions=self._cpp_sys_def.getNDimensions())
# Set the system box
# \param value The new boundaries (a data.boxdim object)
@box.setter
def box(self, value):
if not isinstance(value, boxdim):
raise TypeError('box must be a data.boxdim object')
self._cpp_sys_def.getParticleData().setGlobalBox(value._getBoxDim())
def replicate(self):
raise NotImplementedError
def scale_system(self):
raise NotImplementedError
def get_group(self, filter_):
cls = filter_.__class__
if filter_ in self._groups[cls]:
return self._groups[cls][filter_]
else:
group = _hoomd.ParticleGroup(self._cpp_sys_def, filter_)
self._groups[cls][filter_] = group
return group
@property
def local_snapshot(self):
return LocalSnapshot(self)
| Python | 0.000002 | @@ -6846,8 +6846,258 @@
t(self)%0A
+%0A @property%0A def gpu_snapshot(self):%0A if self._simulation.device.mode != 'gpu':%0A raise RuntimeError(%0A %22Cannot access gpu_snapshot with a non GPU device.%22)%0A else:%0A return LocalSnapshotGPU(self)%0A
|
49839733a7f26070e8d666d91fae177711154e1d | Change histogram_proto to use a custom logger. | tracing/tracing/proto/histogram_proto.py | tracing/tracing/proto/histogram_proto.py | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import logging
try:
# Note: from tracing.proto import histogram_pb2 would make more sense here,
# but unfortunately protoc does not generate __init__.py files if you specify
# an out package (at least for the gn proto_library rule).
import histogram_pb2 # pylint:disable=relative-import
HAS_PROTO = True
except ImportError as e:
try:
# crbug/1234919
# Catapult put the generated histogram_pb2.py in the same source folder,
# while the others (e.g., webrtc) put it in output path. By default we
# try to import from the sys.path. Here allows to try import from the
# source folder as well.
logging.warning(
'Failed to import histogram_pb2: %s', repr(e))
from . import histogram_pb2 # pylint:disable=relative-import
logging.warning(
'Retried and successfully imported histogram_pb2: %s', histogram_pb2)
HAS_PROTO = True
except ImportError:
HAS_PROTO = False
def _EnsureProto():
"""Ensures histogram_pb.py is in the PYTHONPATH.
If the assert fails here, it means your script doesn't ensure histogram_pb2.py
is generated and is in the PYTHONPATH. To fix this, depend on the GN rule
in BUILD.gn and ensure the script gets the out/Whatever/pyproto dir in its
PYTHONPATH (for instance by making your script take a --out-dir=out/Whatever
flag).
"""
assert HAS_PROTO, ('Tried to use histogram protos, but missing '
'histogram_pb2.py. Try cd tracing/proto && make.')
def Pb2():
"""Resolves the histogram proto stub.
Where you would use histogram_pb2.X, instead do histogram_proto.Pb2().X.
"""
_EnsureProto()
return histogram_pb2
if HAS_PROTO:
PROTO_UNIT_MAP = {
histogram_pb2.MS: 'ms',
histogram_pb2.MS_BEST_FIT_FORMAT: 'msBestFitFormat',
histogram_pb2.TS_MS: 'tsMs',
histogram_pb2.N_PERCENT: 'n%',
histogram_pb2.SIZE_IN_BYTES: 'sizeInBytes',
histogram_pb2.BYTES_PER_SECOND: 'bytesPerSecond',
histogram_pb2.J: 'J',
histogram_pb2.W: 'W',
histogram_pb2.A: 'A',
histogram_pb2.V: 'V',
histogram_pb2.HERTZ: 'Hz',
histogram_pb2.UNITLESS: 'unitless',
histogram_pb2.COUNT: 'count',
histogram_pb2.SIGMA: 'sigma',
}
UNIT_PROTO_MAP = {v: k for k, v in PROTO_UNIT_MAP.items()}
PROTO_IMPROVEMENT_DIRECTION_MAP = {
histogram_pb2.BIGGER_IS_BETTER: 'biggerIsBetter',
histogram_pb2.SMALLER_IS_BETTER: 'smallerIsBetter',
}
IMPROVEMENT_DIRECTION_PROTO_MAP = {
v: k for k, v in PROTO_IMPROVEMENT_DIRECTION_MAP.items()
}
def UnitFromProto(proto_unit):
_EnsureProto()
direction = proto_unit.improvement_direction
unit = PROTO_UNIT_MAP[proto_unit.unit]
if direction and direction != histogram_pb2.NOT_SPECIFIED:
unit += '_' + PROTO_IMPROVEMENT_DIRECTION_MAP[direction]
return unit
def ProtoFromUnit(unit):
_EnsureProto()
parts = unit.split('_')
assert unit
assert 0 < len(parts) <= 2, ('expected <unit>_(bigger|smaller)IsBetter' +
str(parts))
proto_unit = histogram_pb2.UnitAndDirection()
proto_unit.unit = UNIT_PROTO_MAP[parts[0]]
if len(parts) > 1:
proto_unit.improvement_direction = IMPROVEMENT_DIRECTION_PROTO_MAP[parts[1]]
return proto_unit
| Python | 0.000001 | @@ -199,23 +199,8 @@
ort%0A
-import logging%0A
try:
@@ -810,243 +810,137 @@
-logging.warning(%0A 'Failed to import histogram_pb2: %25s', repr(e))%0A from . import histogram_pb2 # pylint:disable=relative-import%0A logging.warning(%0A 'Retried and successfully imported histogram_pb2: %25s', histogram_pb2)
+# TODO(wenbinzhang): Clean up import paths to work consistently.%0A from . import histogram_pb2 # pylint:disable=relative-import
%0A
|
2b5baf83b52a32c2424d043f987c0ad70bc564d1 | add a log | vmthunder/compute.py | vmthunder/compute.py | #!/usr/bin/env python
import time
import threading
from vmthunder.drivers import fcg
from vmthunder.session import Session
from vmthunder.instance import Instance
from vmthunder.singleton import singleton
from vmthunder.openstack.common import log as logging
from vmthunder.drivers import volt
LOG = logging.getLogger(__name__)
@singleton
class Compute():
def __init__(self):
self.sessions = {}
self.instances = {}
self.cache_group = fcg.create_group()
self.rlock = threading.RLock()
LOG.debug("creating a Compute_node")
def heartbeat(self):
self.rlock.acquire()
try:
self._heartbeat()
except Exception, e:
LOG.error(str(e))
raise e
finally:
self.rlock.release()
def _heartbeat(self):
LOG.debug("VMThunder: heartbeat start @ %s" % time.asctime())
to_delete_sessions = []
for each_key in self.sessions:
LOG.debug("VMThunder: session_name = %s, instances in session = " % self.sessions[each_key].volume_name)
LOG.debug(self.sessions[each_key].vm)
if not self.sessions[each_key].has_vm():
if self.sessions[each_key].destroy():
to_delete_sessions.append(each_key)
for key in to_delete_sessions:
del self.sessions[key]
info = volt.heartbeat()
for each_key in self.sessions:
for session in info:
if self.sessions[each_key].peer_id == session['peer_id']:
self.sessions[each_key].adjust_for_heartbeat(session['parents'])
break
LOG.debug("VMThunder: heartbeat end @ %s" % time.asctime())
def destroy(self, vm_name):
self.rlock.acquire()
try:
self._destroy(vm_name)
except Exception, e:
LOG.error(str(e))
raise e
finally:
self.rlock.release()
def _destroy(self, vm_name):
self.rlock.acquire()
LOG.debug("VMThunder: destroy vm started, vm_name = %s" % (vm_name))
if self.instances.has_key(vm_name):
instance = self.instances[vm_name]
#session = self.sessions[instance.volume_name]
instance.del_vm()
#session.destroy(vm_name)
del self.instances[vm_name]
self.rlock.release()
LOG.debug("VMThunder: destroy vm completed, vm_name = %s" % vm_name)
def list(self):
self.rlock.acquire()
try:
return self._list()
except Exception, e:
LOG.error(str(e))
raise e
finally:
self.rlock.release()
def _list(self):
def build_list_object(instances):
instance_list = []
for instance in instances.keys():
instance_list.append({
'vm_name': instances[instance].vm_name,
})
self.rlock.release()
return build_list_object(self.instances)
def create(self, volume_name, vm_name, image_connection, snapshot_link):
self.rlock.acquire()
try:
return self._create(volume_name, vm_name, image_connection, snapshot_link)
except Exception, e:
LOG.error(str(e))
raise e
finally:
self.rlock.release()
def _create(self, volume_name, vm_name, image_connection, snapshot_link):
#TODO: roll back if failed
if vm_name not in self.instances.keys():
self.rlock.acquire()
LOG.debug("VMThunder: create vm started, volume_name = %s, vm_name = %s" % (volume_name, vm_name))
if not self.sessions.has_key(volume_name):
self.sessions[volume_name] = Session(volume_name)
session = self.sessions[volume_name]
self.instances[vm_name] = Instance.factory(vm_name, session, snapshot_link)
origin_path = session.deploy_image(image_connection)
LOG.debug("origin is %s" % origin_path)
self.instances[vm_name].start_vm(origin_path)
self.rlock.release()
LOG.debug("VMThunder: create vm completed, volume_name = %s, vm_name = %s, snapshot = %s" %
(volume_name, vm_name, self.instances[vm_name].snapshot_path))
return self.instances[vm_name].snapshot_link | Python | 0.000002 | @@ -3431,32 +3431,91 @@
finally:%0D%0A
+ print %22--------release lock-----------------%22%0D%0A
self
|
ed45058149d919d059ff4080a83f613f18269935 | Disable sqlite loader if sqlite3 isn't available for some reason. | trigger/netdevices/loaders/filesystem.py | trigger/netdevices/loaders/filesystem.py | """
Built-in Loader objects for loading `~trigger.netdevices.NetDevice` metadata
from the filesystem.
"""
__author__ = 'Jathan McCollum'
__maintainer__ = 'Jathan McCollum'
__email__ = 'jathan.mccollum@teamaol.com'
__copyright__ = 'Copyright 2013, AOL Inc.'
__version__ = '1.0'
import itertools
import os
from trigger.conf import settings
from trigger.netdevices.loader import BaseLoader
from trigger import exceptions, rancid
from trigger.exceptions import LoaderFailed
try:
import simplejson as json # Prefer simplejson because of SPEED!
except ImportError:
import json
import sqlite3
import xml.etree.cElementTree as ET
class JSONLoader(BaseLoader):
"""
Wrapper for loading metadata via JSON from the filesystem.
Parse 'netdevices.json' and return list of JSON objects.
"""
is_usable = True
def get_data(self, data_source):
with open(data_source, 'r') as contents:
# TODO (jathan): Can we somehow return an generator like the other
# _parse methods? Maybe using JSONDecoder?
data = json.load(contents)
return data
def load_data_source(self, data_source, **kwargs):
try:
return self.get_data(data_source)
except Exception as err:
raise LoaderFailed("Tried %r; and failed: %r" % (data_source, err))
class XMLLoader(BaseLoader):
"""
Wrapper for loading metadata via XML from the filesystem.
Parse 'netdevices.xml' and return a list of node 2-tuples (key, value).
These are as good as a dict without the extra dict() call.
"""
is_usable = True
def get_data(self, data_source):
#Parsing the complete file into a tree once and extracting outthe
# device nodes is faster than using iterparse(). Curses!!
xml = ET.parse(data_source).findall('device')
# This is a generator within a generator. Trust me, it works in _populate()
data = (((e.tag, e.text) for e in node.getchildren()) for node in xml)
return data
def load_data_source(self, data_source, **kwargs):
try:
return self.get_data(data_source)
except Exception as err:
raise LoaderFailed("Tried %r; and failed: %r" % (data_source, err))
class RancidLoader(BaseLoader):
"""
Wrapper for loading metadata via RANCID from the filesystem.
Parse RANCID's ``router.db`` and return a generator of node 2-tuples (key,
value).
"""
is_usable = True
def get_data(self, data_source, recurse_subdirs=None):
data = rancid.parse_rancid_data(data_source,
recurse_subdirs=recurse_subdirs)
return data
def load_data_source(self, data_source, **kwargs):
# We want to make sure that we've set this variable
recurse_subdirs = kwargs.get('recurse_subdirs',
settings.RANCID_RECURSE_SUBDIRS)
try:
return self.get_data(data_source, recurse_subdirs)
except Exception as err:
raise LoaderFailed("Tried %r; and failed: %r" % (data_source, err))
class SQLiteLoader(BaseLoader):
"""
Wrapper for loading metadata via SQLite from the filesystem.
Parse 'netdevices.sql' and return a list of stuff.
"""
is_usable = True
def get_data(self, data_source, table_name='netdevices'):
connection = sqlite3.connect(data_source)
cursor = connection.cursor()
# Get the column names. This is a simple list strings.
colfetch = cursor.execute('pragma table_info(%s)' % table_name)
results = colfetch.fetchall()
columns = [r[1] for r in results]
# And the devices. This is a list of tuples whose values match the indexes
# of the column names.
devfetch = cursor.execute('select * from %s' % table_name)
devrows = devfetch.fetchall()
# Another generator within a generator, which structurally is a list of
# lists containing 2-tuples (key, value).
data = (itertools.izip(columns, row) for row in devrows)
return data
def load_data_source(self, data_source, **kwargs):
table_name = kwargs.get('table_name', 'netdevices')
try:
return self.get_data(data_source, table_name)
except Exception as err:
raise LoaderFailed("Tried %r; and failed: %r" % (data_source, err))
class CSVLoader(BaseLoader):
"""
Wrapper for loading metadata via CSV from the filesystem.
This leverages the functionality from the `~trigger.rancid`` library.
At the bare minimum your CSV file must be populated with 2-tuples of
"nodeName,manufacturer" (e.g. "test1-abc.net.aol.com,cisco"), separated by
newlines. The ``deviceType`` will default to whatever is specified in
:settings:`FALLBACK_TYPE` and ``deviceStatus`` will default to "up"
("PRODUCTION").
At max you may provide "nodeName,vendor,deviceStatus,deviceType" just like
what you'd expect from RANCID's ``router.db`` file format.
"""
is_usable = True
def get_data(self, data_source):
root_dir, filename = os.path.split(data_source)
data = rancid.parse_rancid_file(root_dir, filename, delimiter=',')
return data
def load_data_source(self, data_source, **kwargs):
try:
return self.get_data(data_source)
except Exception as err:
raise LoaderFailed("Tried %r; and failed: %r" % (data_source, err))
| Python | 0 | @@ -578,23 +578,8 @@
son%0A
-import sqlite3%0A
impo
@@ -611,16 +611,117 @@
as ET%0A%0A
+try:%0A import sqlite3%0A SQLITE_AVAILABLE = True%0Aexcept ImportError:%0A SQLITE_AVAILABLE = False%0A
%0Aclass J
@@ -3369,36 +3369,48 @@
is_usable =
-True
+SQLITE_AVAILABLE
%0A%0A def get_da
|
527ceabcdbded592c02ee2dce18a19ffce0248c2 | Remove unnecesary comment | trunk/bdp_fe/src/bdp_fe/jobconf/views.py | trunk/bdp_fe/src/bdp_fe/jobconf/views.py | """
Module bdp_fe.jobconf.views
"""
import logging
from django import forms
from django.contrib import auth, messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponseNotFound
from django.shortcuts import get_object_or_404, redirect, render_to_response
from django.template import RequestContext
from pymongo import Connection
import custom_model
from models import Job, JobModel
from views_util import safe_int_param
LOGGER = logging.getLogger(__name__)
@login_required
def list_jobs(request):
job_id = safe_int_param(request.GET, 'run_job')
if job_id:
run_job(request, job_id)
return render_to_response('job_listing.html', {
'title': 'Job listing',
'jobs': Job.objects.all(),
}, context_instance=RequestContext(request))
def retrieve_results(job_id):
ans = []
## TODO: make configurable
connection = Connection('localhost', 27017)
db = connection.test_database
job_results = db.test_collection
for job_result in job_results.find({"job_id" : job_id}):
ans.append(job_result)
return ans
@login_required
def view_results(request, job_id):
job_id = int(job_id) ## Django URL regexp enforces this
results = retrieve_results(job_id)
return render_to_response('job_results.html',
{ 'title' : 'Results of job %s' % job_id,
'job_results' : results },
context_instance=RequestContext(request))
def run_job(request, job_id):
try:
job = Job.objects.get(id=job_id)
if job.status != Job.CREATED:
msg = "Cannot start job in %s status" % job.get_status_display()
messages.warning(request, msg)
LOGGER.warning(msg)
return
job.status = Job.RUNNING
job.save()
# TODO: Unimplemented behaviour
LOGGER.warning("Unimplemented job start")
messages.info(request, "Job %s was started." % job_id)
except Job.DoesNotExist:
messages.warning(request, "Cannot start job %d: not found" % job_id)
LOGGER.warning("Job %d not found" % job_id)
class NewJobForm(forms.Form):
name = forms.CharField(max_length=40)
@login_required
def new_job(request):
if request.method == 'POST':
form = NewJobForm(request.POST)
if form.is_valid():
job = Job(name=form.cleaned_data['name'],
user=request.user,
status=Job.CREATED)
job.save()
return redirect(reverse('config_job', args=[job.id]))
else:
form = NewJobForm()
return render_to_response('new_job.html', {
'title': 'New job',
'form': form,
}, context_instance=RequestContext(request))
class UploadJarForm(forms.Form):
file = forms.FileField()
@login_required
def config_job(request, job_id):
job = get_object_or_404(Job, pk=job_id, user=request.user)
if request.method == 'POST':
form = UploadJarForm(request.POST, request.FILES)
if form.is_valid() and custom_model.handle_upload(job,
request.FILES['file']):
return redirect(reverse('upload_data', args=[job.id]))
else:
messages.info(request, 'JAR file upload failed')
else:
form = UploadJarForm()
return render_to_response('upload_jar.html', {
'title': 'Configure custom job',
'job_id' : job.id,
'form': form,
}, context_instance=RequestContext(request))
@login_required
def upload_data(request, job_id):
return HttpResponseNotFound()
| Python | 0 | @@ -1234,43 +1234,8 @@
_id)
- ## Django URL regexp enforces this
%0A
|
9371b1484e7843e479c5c54997d339d46cf4aedd | add logging | fastapp/plugins/__init__.py | fastapp/plugins/__init__.py | import os
import logging
logger = logging.getLogger(__name__)
class Singleton(type):
def __init__(cls, name, bases, dict):
super(Singleton, cls).__init__(name, bases, dict)
cls.instance = None
def __call__(cls,*args,**kw):
if cls.instance is None:
logger.info("Create singleton instance for %s" % cls)
cls.instance = super(Singleton, cls).__call__(*args, **kw)
else:
logger.info("Return singleton instance for %s" % cls)
return cls.instance
class PluginRegistry(object):
__metaclass__ = Singleton
def __init__(self):
self.plugins = []
def __iter__(self):
return iter(self.plugins)
def add(self, cls):
if cls not in self.plugins:
logger.info("Register: %s" % cls)
cls.init()
self.plugins.append(cls)
else:
logger.debug("Already registered: %s" % cls)
def get(self):
return self.plugins
def register_plugin(cls):
"""Class decorator for adding plugins to the registry"""
PluginRegistry().add(cls())
return cls
def call_plugin_func(obj, func):
r_success = {}
r_failed = {}
registry = PluginRegistry()
for plugin in registry.get():
logger.info("Handling plugin %s for %s" % (plugin, func))
try:
plugin_func = getattr(plugin, func)
r = plugin_func(obj)
r_success[plugin.name] = r
except Exception, e:
logger.exception(e)
r_failed[plugin.name] = e
return r_success, r_failed
class Plugin(object):
__metaclass__ = Singleton
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
super(Plugin, self ).__init__()
@property
def name(self):
return self.__class__.__name__
def attach_worker(self, **kwargs):
pass
def config_for_workers(self, base):
# send dictionary with config to workers for the plugin
# the dictionary is available in self.config(base)
config = {}
config.update(self.config(base))
logger.info("Config to worker for plugin %s" % self.name)
return config
@property
def shortname(self):
return self.__class__.__module__.split(".")[-1]
def init(self):
pass
def on_create_user(self, user):
pass
def on_create_base(self, base):
pass
def on_delete_base(self, base):
pass
def on_start_base(self, base):
pass
def on_stop_base(self, base):
pass
def on_restart_base(self, base):
pass
def on_destroy_base(self, base):
pass
def cockpit_context(self):
return {}
def executor_context(self, executor):
return {}
def executor_context_kv(self, executor):
context = self.executor_context(self, executor)
new_context = []
for k, v in context.items():
new_context.append({
'key': k,
'value': k,
})
return new_context
| Python | 0.000001 | @@ -1124,24 +1124,33 @@
in %25s for %25s
+ starting
%22 %25 (plugin,
@@ -1332,16 +1332,171 @@
me%5D = e%0A
+%09%09logger.info(%22Handling plugin %25s for %25s ended%22 %25 (plugin, func))%0A%09logger.info(%22Loaded %25s with success, %25s with errors%22 %25 (len(r_success), len(r_failed)))%0A
%09return
|
279bebc53c2f589db943c91f31240a38ad059d72 | optimize username loginfield for mobile devices | features/gestalten/forms.py | features/gestalten/forms.py | import allauth
from crispy_forms import bootstrap, layout
import django
from django import forms
from django.contrib.auth import models as auth_models
from django.contrib.sites import models as sites_models
from features.groups import models as groups
from utils import forms as utils_forms
from features.gestalten import models
def validate_slug(slug):
if slug in django.conf.settings.ENTITY_SLUG_BLACKLIST:
raise django.core.exceptions.ValidationError(
'Die Adresse \'%(slug)s\' ist reserviert und darf nicht verwendet werden.',
params={'slug': slug}, code='reserved')
if groups.Group.objects.filter(slug=slug).exists():
raise django.core.exceptions.ValidationError(
'Die Adresse \'%(slug)s\' ist bereits vergeben.',
params={'slug': slug}, code='in-use')
class User(utils_forms.FormMixin, forms.ModelForm):
class Meta:
fields = ('first_name', 'last_name', 'username')
labels = {'username': 'Adresse der Benutzerseite / Pseudonym'}
model = auth_models.User
def clean_username(self):
slug = self.cleaned_data['username']
validate_slug(slug)
return slug
class Gestalt(utils_forms.ExtraFormMixin, forms.ModelForm):
extra_form_class = User
class Meta:
fields = ('about', 'public')
model = models.Gestalt
def get_instance(self):
return self.instance.user
def get_layout(self):
DOMAIN = sites_models.Site.objects.get_current().domain
return (
bootstrap.PrependedText(
'username',
'%(domain)s/' % {'domain': DOMAIN}),
'first_name',
'last_name',
layout.Field('about', rows=5),
'public',
utils_forms.Submit('Profil ändern'),
)
class Login(allauth.account.forms.LoginForm):
password = forms.CharField(label='Kennwort', widget=forms.PasswordInput())
remember = forms.BooleanField(label='Anmeldung merken', required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['login'] = forms.CharField(
label='E-Mail-Adresse oder Pseudonym',
widget=forms.TextInput(attrs={'autofocus': 'autofocus'}))
| Python | 0 | @@ -2311,16 +2311,37 @@
(attrs=%7B
+%0A
'autofoc
@@ -2356,12 +2356,208 @@
tofocus'
+,%0A 'autocomplete': 'username',%0A 'autocorrect': 'off',%0A 'autocapitalize': 'none',%0A 'spellcheck': 'false'%0A
%7D))%0A
|
64804965e031f365937ef8fe70dc749c4532053d | fix abstract scraper, can't use lxml's url parsing because we need a custom user agent | tx_highered/scripts/initial_wikipedia.py | tx_highered/scripts/initial_wikipedia.py | #! /usr/bin/env python
import datetime
import requests
from lxml.html import parse, tostring
from tx_highered.models import Institution
def get_wiki_title(name):
endpoint = "http://en.wikipedia.org/w/api.php"
params = dict(action="opensearch",
search=name,
limit=1,
namespace=0,
format="json",)
r = requests.get(endpoint, params=params)
try:
_, results = r.json
title = results[0]
except IndexError:
return None
return title
def get_wiki_abstract(url):
doc = parse(url) # won't handle https
root = doc.getroot()
toc = root.get_element_by_id('toc')
abstract = []
for elem in toc.getparent().iterchildren():
if elem == toc:
break
if elem.tag == 'p':
elem.make_links_absolute()
abstract.append(tostring(elem))
return "\n".join(abstract).strip()
def main():
queryset = Institution.objects.filter(institution_type='uni')
qs = queryset.filter(wikipedia_title__isnull=True)
for inst in qs:
title = get_wiki_title(inst.name)
if title:
inst.wikipedia_title = title
inst.save()
print inst.name + " -> " + title
qs = queryset.filter(wikipedia_title__isnull=False, wikipedia_scraped=None)
for inst in qs:
text = get_wiki_abstract(inst.wikipedia_url)
if text:
inst.wikipedia_abstract = text
inst.wikipedia_scraped = datetime.datetime.now()
inst.save()
print inst
if __name__ == "__main__":
main()
| Python | 0.000001 | @@ -20,23 +20,112 @@
hon%0A
-import datetime
+try:%0A from django.utils.timezone import now%0Aexcept ImportError:%0A from datetime.datetime import now
%0A%0Aim
@@ -160,21 +160,35 @@
import
-parse
+document_fromstring
, tostri
@@ -643,111 +643,226 @@
-doc = parse(url) # won't handle https
+r = requests.get(url, headers=%7B'User-Agent': 'thedp-scraper/0.1alpha'%7D)
%0A
-root
+doc
= doc
-.getroot()%0A toc = root.get_element_by_id('toc')
+ument_fromstring(r.text)%0A root = doc%0A try:%0A toc = root.get_element_by_id('toc')%0A except KeyError:%0A return None
%0A
@@ -1031,16 +1031,19 @@
bsolute(
+url
)%0A
@@ -1699,26 +1699,8 @@
d =
-datetime.datetime.
now(
|
624577c1a783ce7f06019ec6d96774f5c1b76432 | fix anonymous bug | wechat-forwarding.py | wechat-forwarding.py | #!/usr/bin/env python3
# -*-encoding:utf-8-*-
import os, json, requests, html
from xml.etree import ElementTree as ETree
import itchat
from itchat.content import *
sending_type = {'Picture': 'img', 'Video': 'vid'}
data_path = 'data'
nickname = ''
as_chat_bot = True
bot = None
config = {}
if __name__ == '__main__':
with open('config.json', 'r') as fin:
config = json.loads(fin.read())
if not os.path.exists(data_path):
os.mkdir(data_path)
# if the QR code doesn't show correctly, you can try to change the value
# of enableCdmQR to 1 or -1 or -2. It nothing works, you can change it to
# enableCmdQR=True and a picture will show up.
bot = itchat.new_instance()
bot.auto_login(hotReload=True, enableCmdQR=2)
nickname = bot.loginInfo['User']['NickName']
# tuling chat bot
def talks_robot(info):
api_url = 'http://www.tuling123.com/openapi/api'
apikey = ''
data = {'key': apikey, 'info': info.lower()}
try:
req = requests.post(api_url, data=data, timeout=5).text
txt = json.loads(req)['text']
if txt.find(u'不知道') >= 0:
return
if txt.find(u'不会') >= 0:
return
if txt.find(u'抱歉') >= 0:
return
return txt
except:
pass
return None
def get_sender_receiver(msg):
sender = nickname
receiver = nickname
if msg['FromUserName'][0:2] == '@@': # group chat
sender = msg['ActualNickName']
m = bot.search_chatrooms(userName=msg['FromUserName'])
if m is not None:
receiver = m['NickName']
elif msg['ToUserName'][0:2] == '@@': # group chat by myself
if 'ActualNickName' in msg:
sender = msg['ActualNickName']
else:
m = bot.search_friends(userName=msg['FromUserName'])
if m is not None:
sender = m['NickName']
m = bot.search_chatrooms(userName=msg['ToUserName'])
if m is not None:
receiver = m['NickName']
else: # personal chat
m = bot.search_friends(userName=msg['FromUserName'])
if m is not None:
sender = m['NickName']
m = bot.search_friends(userName=msg['ToUserName'])
if m is not None:
receiver = m['NickName']
return html.unescape(sender), html.unescape(receiver)
def print_msg(msg):
print(' '.join(msg))
def get_whole_msg(msg, prefix, sender, download=True):
if len(msg['FileName']) > 0 and len(msg['Url']) == 0:
if download: # download the file into data_path directory
fn = os.path.join(data_path, msg['FileName'])
msg['Text'](fn)
if os.path.getsize(fn) == 0:
return []
c = '@%s@%s' % (sending_type.get(msg['Type'], 'fil'), fn)
else:
c = '@%s@%s' % (sending_type.get(msg['Type'], 'fil'), msg['FileName'])
return ['%s[%s]:' % (prefix, sender), c]
c = msg['Text']
if len(msg['Url']) > 0:
if len(msg['OriContent']) > 0:
try: # handle map label
content_tree = ETree.fromstring(msg['OriContent'])
if content_tree is not None:
map_label = content_tree.find('location')
if map_label is not None:
c += ' ' + map_label.attrib['poiname']
c += ' ' + map_label.attrib['label']
except:
pass
url = html.unescape(msg['Url'])
c += ' ' + url
# if a message starts with '//', send as anonymous
if c.startswith == '//':
sender = u'匿名'
c = c[2:].strip()
return ['%s[%s]: %s' % (prefix, sender, c)]
@bot.msg_register([TEXT], isFriendChat=True, isGroupChat=False)
def personal_msg(msg):
global as_chat_bot
text = msg['Text'].strip()
if text == u'闭嘴':
as_chat_bot = False
if text == u'张嘴吃药':
as_chat_bot = True
return talks_robot(text)
@bot.msg_register([FRIENDS])
def accept_friend(msg):
bot.add_friend(msg['RecommendInfo']['UserName'], 3)
@bot.msg_register([TEXT, PICTURE, MAP, SHARING, RECORDING, ATTACHMENT, VIDEO], isFriendChat=False, isGroupChat=True)
def group_msg(msg):
# chat bot functionality
global as_chat_bot
if 'IsAt' in msg and msg['IsAt'] == True and msg['Type'] == 'Text' and \
msg['ToUserName'][0:2] != '@@' and msg['Text'].find(u'@' + nickname) >= 0:
text = msg['Text'].replace(u'@' + nickname, '').strip()
if text == u'闭嘴':
as_chat_bot = False
return
if as_chat_bot:
info = talks_robot(text)
return info
return
# forwarding functionality
group = msg['FromUserName']
if msg['ToUserName'][0:2] == '@@': # message sent by myself
group = msg['ToUserName']
sender, receiver = get_sender_receiver(msg)
if sender == '':
sender = nickname
# check if the message is in the config
if receiver not in config: # if not in the config, do nothing
return
# process message and send it to all the subscribers
prefix = config[receiver]['prefix']
msg_send = get_whole_msg(msg, prefix, sender)
if msg_send is None or len(msg_send) == 0:
return
print_msg(msg_send)
for tosend in config[receiver]['sub']:
room = bot.search_chatrooms(name=tosend)
for r in room:
if r['UserName'] == group: # don't send back to the source
continue
if r['NickName'] != tosend: # check group name exact match
continue
for m in msg_send: # iterate messages (for images, videos, and files)
bot.send(m, toUserName=r['UserName'])
if __name__ == '__main__':
bot.run()
| Python | 0.000002 | @@ -3569,16 +3569,14 @@
with
- ==
+(
'//'
+)
:%0A
|
e8fb8c1d7dda19c82f45d6b8c3883d5d4314a5ad | Fix encoding issue with Office Module | modules/Office365Brute.py | modules/Office365Brute.py | #! /bin/python
# Created by Adam Compton (tatanus)
# Part of myBFF
from core.webModule import webModule
import base64
from lxml import etree
import re
import random
import time
import requests
from requests import session
class Office365Brute(webModule):
def __init__(self, config, display, lock):
super(Office365Brute, self).__init__(config, display, lock)
self.fingerprint="[o, O]utlook"
self.response="Success"
term = ['credential', 'account', 'password', 'login', 'confidential']
def somethingCool(self, term, data, config):
# Parse the result xml
root = etree.fromstring(data)
xpathStr = "/s:Envelope/s:Body/m:FindItemResponse/m:ResponseMessages/m:FindItemResponseMessage/m:RootFolder/t" \
":Items/t:Message"
namespaces = {
's': 'http://schemas.xmlsoap.org/soap/envelope/',
't': 'http://schemas.microsoft.com/exchange/services/2006/types',
'm': 'http://schemas.microsoft.com/exchange/services/2006/messages',
}
contacts = []
# Print Mail properties
print("[+] Searching for sensitive emails...")
elements = root.xpath(xpathStr, namespaces=namespaces)
for element in elements:
try:
subject = element.find('{http://schemas.microsoft.com/exchange/services/2006/types}Subject').text
fromname = element.find(
'{http://schemas.microsoft.com/exchange/services/2006/types}From/{'
'http://schemas.microsoft.com/exchange/services/2006/types}Mailbox/{'
'http://schemas.microsoft.com/exchange/services/2006/types}Name').text
# fromemail = element.find(
# '{http://schemas.microsoft.com/exchange/services/2006/types}From/{'
# 'http://schemas.microsoft.com/exchange/services/2006/types}Mailbox/{'
# 'http://schemas.microsoft.com/exchange/services/2006/types}EmailAddress').text
itemid = element.find('{http://schemas.microsoft.com/exchange/services/2006/types}ItemId').attrib['Id']
changekey = element.find('{http://schemas.microsoft.com/exchange/services/2006/types}ItemId').attrib['ChangeKey']
contacts.append(fromname.encode('ascii', 'ignore'))# + " (" + fromemail.encode('ascii', 'ignore') + ")")
for search_term in term:
if re.search(search_term, subject, re.IGNORECASE):
print "[+] This could be interesting: "
print "[+] * Subject : " + subject.encode('ascii', 'ignore')
print "[+] * From : " + fromname.encode('ascii', 'ignore')# + " (" + fromemail.encode('ascii', 'ignore') + ")"
except:
pass
print("[+] Any contacts found will be saved to tmp/contacts-" + config["USERNAME"] + "...")
try:
for contact in sorted(set(contacts)):
#print("[+] Contact Name: " + contact)
f = open('./tmp/contacts-' + config["USERNAME"] + '.txt', 'a')
f.write(contact + '\n')
f.close()
except:
print("[-] No contacts found in mailbox.")
def connectTest(self, config, payload, auth):
if config["domain"]:
user = config["domain"] + '\\' + config["USERNAME"]
else:
user = config["USERNAME"]
if 'https' in config["HOST"]:
host = config["HOST"].strip('https://')
else:
host = config["HOST"].strip('http://')
if '/' in host:
host = re.sub(r'/s\w+', '', host)
with session() as c:
c.headers.update({"Host": host,
"Content-Type": "text/xml; charset=UTF-8",
"Content-Length": len(payload),
"Authorization": "Basic %s" % auth})
resp1 = c.post(config["HOST"] + "/ews/Exchange.asmx", data=payload, allow_redirects=True, verify=False)#, proxies=proxySvrs)
if "200" in str(resp1):
print("[+] User Credentials Successful: " + user + ":" + config["PASSWORD"])
if not config["dry_run"]:
print("[!] Time to do something cool!")
data = str(resp1.text)
self.somethingCool(self.term, data, config)
else:
print("[-] Login Failed for: " + config["USERNAME"] + ":" + config["PASSWORD"])
def payload(self, config):
payload = """<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types">
<soap:Body>
<FindItem xmlns="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types"
Traversal="Shallow">
<ItemShape>
<t:BaseShape>Default</t:BaseShape>
</ItemShape>
<ParentFolderIds>
<t:DistinguishedFolderId Id="inbox"/>
</ParentFolderIds>
</FindItem>
</soap:Body>
</soap:Envelope>""".format()
if config["PASS_FILE"]:
pass_lines = [pass_line.rstrip('\n') for pass_line in open(config["PASS_FILE"])]
for pass_line in pass_lines:
if config["UserFile"]:
lines = [line.rstrip('\n') for line in open(config["UserFile"])]
for line in lines:
config["USERNAME"] = line.strip('\n')
config["PASSWORD"] = pass_line.strip('\n')
auth = base64.encodestring("%s:%s" % (config["USERNAME"], config["PASSWORD"])).replace('\n', '')
self.connectTest(config, payload, auth)
else:
config["PASSWORD"] = pass_line.strip('\n')
auth = base64.encodestring("%s:%s" % (config["USERNAME"], config["PASSWORD"])).replace('\n', '')
self.connectTest(config, payload, auth)
time.sleep(config["timeout"])
elif config["UserFile"]:
lines = [line.rstrip('\n') for line in open(config["UserFile"])]
for line in lines:
config["USERNAME"] = line.strip('\n')
auth = base64.encodestring("%s:%s" % (config["USERNAME"], config["PASSWORD"])).replace('\n', '')
self.connectTest(config, payload, auth)
else:
auth = base64.encodestring("%s:%s" % (config["USERNAME"], config["PASSWORD"])).replace('\n', '')
self.connectTest(config, payload, auth)
| Python | 0 | @@ -215,16 +215,28 @@
session%0A
+import sys%0A%0A
%0A%0Aclass
@@ -1461,37 +1461,16 @@
nt.find(
-%0A
'%7Bhttp:/
@@ -4301,16 +4301,100 @@
cool!%22)%0A
+ resp2 = resp1.text.encode('raw_unicode_escape').decode('ascii')%0A
@@ -4420,22 +4420,17 @@
str(resp
-1.text
+2
)%0A
|
db3f71f537a85396d777ba28d3ad6c8156137c24 | Change pg key | src/python/pagerduty.py | src/python/pagerduty.py | import json
import urllib2
PD_URL = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
TIMEOUT = 10
def request(action, json_str):
obj = json.loads(json_str)
description = "%s %s is %s ( %s )" % (
obj.get('host', 'unknown host'),
obj.get('service', 'unknown service'),
obj.get('state', 'unknown state'),
obj.get('metric', 'nil'))
pg_key = obj.pop('pg_key')
event = {
'service_key': pg_key,
'event_type': action,
'incident_key': "%s %s" % (obj['host'], obj['service']),
'description': description,
'details': json.dumps(obj)
}
try:
result = json.loads(
urllib2.urlopen(PD_URL, json.dumps(event), TIMEOUT).read())
print result
except Exception, e:
print str(e)
return False
return result['status'] == 'success'
def trigger(json_str):
return request('trigger', json_str)
def acknowledge(json_str):
return request('acknowledge', json_str)
def resolve(json_str):
return request('resolve', json_str)
args = {
'pg_key': '113852fbf4d34663b87b7321e9eba1e1',
'description': 'this is a test',
'host': 'foobar.com',
'service': 'whatever'
}
#trigger(json.dumps(args))
#resolve(json.dumps(args))
| Python | 0.000001 | @@ -1121,40 +1121,13 @@
': '
-113852fbf4d34663b87b7321e9eba1e1
+fixme
',%0A
|
8c959354b59fb25f63ca73ecdcbd0f59197cabc9 | Add --random option. | scanless/cli/main.py | scanless/cli/main.py | #!/usr/bin/env python
#
# scanless - public port scan scrapper
# https://github.com/vesche/scanless
#
import argparse
import sys
from scanless.scanners import *
SCAN_LIST = '''Scanner Name | Website
---------------|------------------------------
yougetsignal | http://www.yougetsignal.com
viewdns | http://viewdns.info
hackertarget | https://hackertarget.com
ipfingerprints | http://www.ipfingerprints.com
pingeu | http://ping.eu
spiderip | https://spiderip.com
portcheckers | http://www.portcheckers.com
t1shopper | http://www.t1shopper.com
'''
SCANNERS = { 'yougetsignal': yougetsignal,
'viewdns': viewdns,
'hackertarget': hackertarget,
'ipfingerprints': ipfingerprints,
'pingeu': pingeu,
'spiderip': spiderip,
'portcheckers': portcheckers,
't1shopper': t1shopper }
def scanless(target, scanner):
def run(s):
try:
return SCANNERS[s].scan(target)
except:
return 'Error, {} was unable to run.'.format(s)
print('Running scanless...')
if scanner == 'all':
for s, _ in SCANNERS.items():
print(run(s))
elif scanner in SCANNERS:
print(run(scanner))
else:
print('Scanner not found, see --list to view all supported scanners.')
def get_parser():
parser = argparse.ArgumentParser(description='scanless, public port scan scrapper')
parser.add_argument('-t', '--target', help='ip or domain to scan',
type=str)
parser.add_argument('-s', '--scanner', help='scanner to use (default: yougetsignal)',
type=str, default='yougetsignal')
parser.add_argument('-l', '--list', help='list scanners',
action='store_true')
parser.add_argument('-a', '--all', help='use all the scanners',
action='store_true')
return parser
def main():
parser = get_parser()
args = vars(parser.parse_args())
if args['list']:
print(SCAN_LIST)
return
if not args['target']:
parser.print_help()
return
target = args['target']
scanner = args['scanner'].lower()
if args['all']:
scanner = 'all'
scanless(target, scanner)
if __name__ == '__main__':
main()
| Python | 0.000001 | @@ -123,16 +123,42 @@
port sys
+%0Afrom random import choice
%0A%0Afrom s
@@ -1779,16 +1779,132 @@
ignal')%0A
+ parser.add_argument('-r', '--random', help='use a random scanner',%0A action='store_true')%0A
pars
@@ -2411,16 +2411,99 @@
ower()%0A%0A
+ if args%5B'random'%5D:%0A scanner = choice(%5Bs for s, _ in SCANNERS.items()%5D)%0A%0A
if a
|
abce4c9958d55e9d46e06c81b670a3fcc56f47ab | Rename render to wptexturize, delegate render to wptexturize. We will add wpautop soon. | wpmarkup/__init__.py | wpmarkup/__init__.py | #!/usr/bin/env python
import re
"""
A simple WordPress markup.
"""
class Markup:
# TODOl: add the l10n support
# translators: opening tokeny quote
opening_quote = '“'
#translators: closing tokeny quote
closing_quote = '”'
cockney = [ "'tain't","'twere","'twas","'tis","'twill","'til","'bout","'nuff","'round","'cause"]
cockneyreplace = [ "’tain’t","’twere","’twas","’tis","’twill","’til","’bout","’nuff","’round", "’cause" ]
static_characters = [ '---', ' -- ', '--', ' - ', 'xn–', '...', '``', '\'s', '\'\'', ' (tm)' ]
static_replacements = [ '—', ' — ', '–', ' – ', 'xn--', '…', opening_quote, '’s', closing_quote, ' ™']
static_dict = dict(zip(static_characters+cockney, static_replacements+cockneyreplace))
static_regex = re.compile("(%s)" % "|".join(map(re.escape, static_dict.keys())))
dynamic_characters = [ "'(\d\d(?:’|')?s)", '(\s|\A|")\'', '(\d+)"', "(\d+)'", "(\S)'([^'\s])", '(\s|\A)"(?!\s)', '"(\s|\S|\Z)', "'([\s.]|\Z)", '(\d+)x(\d+)', "(<.*>|[.*])" ]
dynamic_replacements = [ r'’\1',r'\1‘', r'\1″', r'\1′', r'\1’\2', r'\1%s' % opening_quote , r'%s\1' % closing_quote, r'’\1', r'\1×\2', r'&\1' ]
dynamic_regex = zip([ re.compile(x, re.DOTALL) for x in dynamic_characters ], dynamic_replacements)
no_texturize_tags = ['pre', 'code', 'kbd', 'style', 'script', 'tt']
no_texturize_shortcodes = ['code']
token_regex = re.compile('(<.*?>|\[.*?\])', re.DOTALL)
@staticmethod
def render(raw):
"""
>>> Markup.render(''''cause today's effort makes it worth tomorrow's "holiday"...''')
'’cause today’s effort makes it worth tomorrow’s “holiday”…'
>>> Markup.render('<pre>sadsadasd</code>"baba"</pre>')
'<pre>sadsadasd</code>"baba"</pre>'
"""
no_texturize_tags_stack = []
no_texturize_shortcodes_stack = []
output = []
for token in Markup.token_regex.split(raw) :
if len(token) and '<' != token[0] and '[' != token[0] \
and len(no_texturize_shortcodes_stack) == 0 \
and len(no_texturize_tags_stack) == 0: #If it's not a tag
token = Markup.static_regex.sub(lambda mo: Markup.static_dict[mo.string[mo.start():mo.end()]], token)
for regex, repl in Markup.dynamic_regex:
token = regex.sub(repl, token)
else:
Markup.pushpop_element(token, no_texturize_tags_stack, Markup.no_texturize_tags, '<', '>');
Markup.pushpop_element(token, no_texturize_shortcodes_stack, Markup.no_texturize_shortcodes, '[', ']');
output.append(token)
return "".join(output)
@staticmethod
def pushpop_element(text, stack, disabled_elements, opening = '<', closing = '>'):
o = re.escape(opening)
c = re.escape(closing)
for element in disabled_elements:
if re.match(r'^%s%s\b' % (o, element), text):
stack.append(element)
break
if re.match(r'^%s/%s%s' % (o, element, c), text):
if len(stack):
last = stack.pop()
# disable texturize until we find a closing tag of our type (e.g. <pre>)
# even if there was invalid nesting before that
# Example: in the case <pre>sadsadasd</code>"baba"</pre> "baba" won't be texturized
if last != element: stack.append(last)
break
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python | 0 | @@ -1989,32 +1989,117 @@
e%3E'%0A %22%22%22%0A
+ return Markup.wptexturize(raw)%0A%0A%0A @staticmethod%0A def wptexturize(raw):%0A
no_textu
|
3999e9812a766066dcccf6a4d07174144cb9f72d | Add Minecraft Wiki link to version item | wurstmineberg.45s.py | wurstmineberg.45s.py | #!/usr/local/bin/python3
import requests
people = requests.get('https://api.wurstmineberg.de/v2/people.json').json()
status = requests.get('https://api.wurstmineberg.de/v2/world/wurstmineberg/status.json').json()
print(len(status['list']))
print('---')
print('Version: {}|color=gray'.format(status['version']))
for wmb_id in status['list']:
display_name = people['people'].get(wmb_id, {}).get('name', wmb_id)
if people['people'].get(wmb_id, False) and people['people'][wmb_id].get('slack', False):
slack_name = people['people'][wmb_id]['slack']['username']
slack_url = 'https://wurstmineberg.slack.com/messages/@' + slack_name
else:
slack_url = None
print('{}|href=https://wurstmineberg.de/people/{} color=#2889be'.format(display_name, wmb_id))
if slack_url is not None:
print('@{}|alternate=true href={} color=red'.format(slack_name, slack_url))
print('---')
print('Start Minecraft | bash=/usr/bin/open param1=-a param2=Minecraft terminal=false')
| Python | 0 | @@ -267,18 +267,63 @@
rsion: %7B
-%7D%7C
+ver%7D%7Chref=http://minecraft.gamepedia.com/%7Bver%7D
color=gr
@@ -333,16 +333,20 @@
.format(
+ver=
status%5B'
|
4b2627ea786031ea7b1e622fc5cb665a017b3f63 | fix InlineFieldList for WTForms v3 | flask_admin/model/fields.py | flask_admin/model/fields.py | import itertools
from wtforms.validators import ValidationError
from wtforms.fields import FieldList, FormField, SelectFieldBase
try:
from wtforms.fields import _unset_value as unset_value
except ImportError:
from wtforms.utils import unset_value
from flask_admin._compat import iteritems
from .widgets import (InlineFieldListWidget, InlineFormWidget,
AjaxSelect2Widget)
class InlineFieldList(FieldList):
widget = InlineFieldListWidget()
def __init__(self, *args, **kwargs):
super(InlineFieldList, self).__init__(*args, **kwargs)
def __call__(self, **kwargs):
# Create template
meta = getattr(self, 'meta', None)
if meta:
template = self.unbound_field.bind(form=None, name='', _meta=meta)
else:
template = self.unbound_field.bind(form=None, name='')
# Small hack to remove separator from FormField
if isinstance(template, FormField):
template.separator = ''
template.process(None)
return self.widget(self,
template=template,
check=self.display_row_controls,
**kwargs)
def display_row_controls(self, field):
return True
def process(self, formdata, data=unset_value):
res = super(InlineFieldList, self).process(formdata, data)
# Postprocess - contribute flag
if formdata:
for f in self.entries:
key = 'del-%s' % f.id
f._should_delete = key in formdata
return res
def validate(self, form, extra_validators=tuple()):
"""
Validate this FieldList.
Note that FieldList validation differs from normal field validation in
that FieldList validates all its enclosed fields first before running any
of its own validators.
"""
self.errors = []
# Run validators on all entries within
for subfield in self.entries:
if not self.should_delete(subfield) and not subfield.validate(form):
self.errors.append(subfield.errors)
chain = itertools.chain(self.validators, extra_validators)
self._run_validation_chain(form, chain)
return len(self.errors) == 0
def should_delete(self, field):
return getattr(field, '_should_delete', False)
def populate_obj(self, obj, name):
values = getattr(obj, name, None)
try:
ivalues = iter(values)
except TypeError:
ivalues = iter([])
candidates = itertools.chain(ivalues, itertools.repeat(None))
_fake = type(str('_fake'), (object, ), {})
output = []
for field, data in zip(self.entries, candidates):
if not self.should_delete(field):
fake_obj = _fake()
fake_obj.data = data
field.populate_obj(fake_obj, 'data')
output.append(fake_obj.data)
setattr(obj, name, output)
class InlineFormField(FormField):
"""
Inline version of the ``FormField`` widget.
"""
widget = InlineFormWidget()
class InlineModelFormField(FormField):
"""
Customized ``FormField``.
Excludes model primary key from the `populate_obj` and
handles `should_delete` flag.
"""
widget = InlineFormWidget()
def __init__(self, form_class, pk, form_opts=None, **kwargs):
super(InlineModelFormField, self).__init__(form_class, **kwargs)
self._pk = pk
self.form_opts = form_opts
def get_pk(self):
if isinstance(self._pk, (tuple, list)):
return tuple(getattr(self.form, pk).data for pk in self._pk)
return getattr(self.form, self._pk).data
def populate_obj(self, obj, name):
for name, field in iteritems(self.form._fields):
if name != self._pk:
field.populate_obj(obj, name)
class AjaxSelectField(SelectFieldBase):
"""
Ajax Model Select Field
"""
widget = AjaxSelect2Widget()
separator = ','
def __init__(self, loader, label=None, validators=None, allow_blank=False, blank_text=u'', **kwargs):
super(AjaxSelectField, self).__init__(label, validators, **kwargs)
self.loader = loader
self.allow_blank = allow_blank
self.blank_text = blank_text
def _get_data(self):
if self._formdata:
model = self.loader.get_one(self._formdata)
if model is not None:
self._set_data(model)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def _format_item(self, item):
value = self.loader.format(self.data)
return (value[0], value[1], True)
def process_formdata(self, valuelist):
if valuelist:
if self.allow_blank and valuelist[0] == u'__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank and self.data is None:
raise ValidationError(self.gettext(u'Not a valid choice'))
class AjaxSelectMultipleField(AjaxSelectField):
"""
Ajax-enabled model multi-select field.
"""
widget = AjaxSelect2Widget(multiple=True)
def __init__(self, loader, label=None, validators=None, default=None, **kwargs):
if default is None:
default = []
super(AjaxSelectMultipleField, self).__init__(loader, label, validators, default=default, **kwargs)
self._invalid_formdata = False
def _get_data(self):
formdata = self._formdata
if formdata:
data = []
# TODO: Optimize?
for item in formdata:
model = self.loader.get_one(item) if item else None
if model:
data.append(model)
else:
self._invalid_formdata = True
self._set_data(data)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def process_formdata(self, valuelist):
self._formdata = set()
for field in valuelist:
for n in field.split(self.separator):
self._formdata.add(n)
def pre_validate(self, form):
if self._invalid_formdata:
raise ValidationError(self.gettext(u'Not a valid choice'))
| Python | 0 | @@ -1314,16 +1314,36 @@
et_value
+, extra_filters=None
):%0A
@@ -1388,16 +1388,29 @@
process(
+%0A
formdata
@@ -1411,24 +1411,53 @@
rmdata, data
+, extra_filters=extra_filters
)%0A%0A #
|
eb97762538126cb4a451ed23a025490360d01bf1 | fixed on supress_exc=True | flask_vgavro_utils/utils.py | flask_vgavro_utils/utils.py | import subprocess
import logging
from datetime import datetime
from werkzeug.local import LocalProxy
from werkzeug.utils import import_string
from flask import g
from marshmallow.utils import UTC
class classproperty(property):
"""
A decorator that behaves like @property except that operates
on classes rather than instances.
Copy of sqlalchemy.util.langhelpers.classproperty, because first one executed
on class declaration.
"""
def __init__(self, fget, *arg, **kw):
super(classproperty, self).__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(desc, self, cls):
return desc.fget(cls)
@classproperty
def NotImplementedProperty(self):
raise NotImplementedError()
NotImplementedClassProperty = NotImplementedProperty
class EntityLoggerAdapter(logging.LoggerAdapter):
"""
Adds info about the entity to the logged messages.
"""
def __init__(self, logger, entity):
self.logger = logger
self.entity = entity or '?'
def process(self, msg, kwargs):
return '[{}] {}'.format(self.entity, msg), kwargs
def _resolve_obj_key(obj, key, supress_exc):
if key.isdigit():
try:
return obj[int(key)]
except:
try:
return obj[key]
except Exception as exc:
if supress_exc:
return exc
raise ValueError('Could not resolve "{}" on {} object: {}'.format(key, obj))
else:
try:
return obj[key]
except:
try:
return getattr(obj, key)
except Exception as exc:
if supress_exc:
return exc
raise ValueError('Could not resolve "{}" on {} object'.format(key, obj))
def resolve_obj_path(obj, path, suppress_exc=False):
dot_pos = path.find('.')
if dot_pos == -1:
return _resolve_obj_key(obj, path, suppress_exc)
else:
key, path = path[:dot_pos], path[(dot_pos + 1):]
return resolve_obj_path(_resolve_obj_key(obj, key, suppress_exc),
path, suppress_exc)
class AttrDict(dict):
__getattr__ = dict.__getitem__
def __dir__(self):
# autocompletion for ipython
return super().__dir__() + list(self.keys())
def maybe_attr_dict(data):
if isinstance(data, dict):
return AttrDict({k: maybe_attr_dict(v) for k, v in data.items()})
return data
def hstore_dict(value):
return {k: str(v) for k, v in value.items()}
def maybe_encode(string, encoding='utf-8'):
return isinstance(string, bytes) and string or str(string).encode(encoding)
def maybe_decode(string, encoding='utf-8'):
return isinstance(string, str) and string.decode(encoding) or string
def maybe_import(value):
return isinstance(value, str) and import_string(value) or value
def datetime_from_utc_timestamp(timestamp):
return datetime.utcfromtimestamp(float(timestamp)).replace(tzinfo=UTC)
def utcnow():
return datetime.now(tz=UTC)
def is_instance_or_proxied(obj, cls):
if isinstance(obj, LocalProxy):
obj = obj._get_current_object()
return isinstance(obj, cls)
def local_proxy_on_g(attr, callback):
def wrapper():
if g:
if not hasattr(g, attr):
setattr(g, attr, callback())
return getattr(g, attr)
return LocalProxy(wrapper)
def get_git_repository_info(path='./'):
if not hasattr(get_git_repository_info, '_info'):
get_git_repository_info._info = {}
info = get_git_repository_info._info
if path not in info:
try:
pipe = subprocess.Popen(['git', 'log', '-1', '--pretty=format:"%h|%ce|%cd"'],
stdout=subprocess.PIPE, cwd=path)
out, err = pipe.communicate()
info[path] = dict(zip(('rev', 'email', 'time'), out.split('|')))
except:
# do not retry on first fail
info[path] = {}
# raise
return info[path]
class ReprMixin:
def __repr__(self):
dict_ = hasattr(self, 'to_dict') and self.to_dict() or self.__dict__
items = dict_.items()
items_str = ', '.join((u'{}={}'.format(k, v) for k, v in items))
return '<{}({})>'.format(self.__class__.__name__, items_str)
| Python | 0.998596 | @@ -1152,21 +1152,8 @@
key
-, supress_exc
):%0A
@@ -1325,71 +1325,8 @@
xc:%0A
- if supress_exc:%0A return exc%0A
@@ -1393,18 +1393,54 @@
bject: %7B
-%7D'
+!r%7D'%0A
.format(
@@ -1447,16 +1447,21 @@
key, obj
+, exc
))%0A e
@@ -1621,71 +1621,8 @@
xc:%0A
- if supress_exc:%0A return exc%0A
@@ -1686,17 +1686,57 @@
%7D object
-'
+: %7B!r%7D'%0A
.format(
@@ -1743,16 +1743,21 @@
key, obj
+, exc
))%0A%0A%0Adef
@@ -1780,33 +1780,32 @@
h(obj, path, sup
-p
ress_exc=False):
@@ -1801,24 +1801,37 @@
exc=False):%0A
+ try:%0A
dot_pos
@@ -1847,16 +1847,20 @@
nd('.')%0A
+
if d
@@ -1877,24 +1877,28 @@
-1:%0A
+
return _reso
@@ -1918,32 +1918,22 @@
bj, path
-, suppress_exc)%0A
+)%0A
else
@@ -1942,16 +1942,20 @@
+
key, pat
@@ -1995,16 +1995,20 @@
+ 1):%5D%0A
+
@@ -2060,24 +2060,45 @@
key
+)
,
-suppress_exc),
+path)%0A except Exception as exc:
%0A
@@ -2106,51 +2106,60 @@
- path, suppress_exc)
+if supress_exc:%0A return exc%0A raise
%0A%0A%0Ac
|
d792201bc311a15e5df48259008331b771c59aca | Fix CSS problem when Flexx is enbedded in page-app | flexx/ui/layouts/_layout.py | flexx/ui/layouts/_layout.py | """ Layout widgets
"""
from . import Widget
class Layout(Widget):
""" Abstract class for widgets that organize their child widgets.
Panel widgets are layouts that do not take the natural size of their
content into account, making them more efficient and suited for
high-level layout. Other layouts, like HBox, are more suited for
laying out content where the natural size is important.
"""
CSS = """
body {
margin: 0;
padding: 0;
overflow: hidden;
}
.flx-Layout {
/* sizing of widgets/layouts inside layout is defined per layout */
width: 100%;
height: 100%;
margin: 0px;
padding: 0px;
border-spacing: 0px;
border: 0px;
}
"""
| Python | 0.000059 | @@ -497,16 +497,18 @@
+/*
overflow
@@ -516,16 +516,18 @@
hidden;
+*/
%0A %7D%0A
|