repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
tracyjacks/PyMetWeather | pymetweather/pymetweather.py | 1 | 13941 | import curses
from datetime import date, timedelta
import locale
from textwrap import fill
from pymetweather.forecasts import WeatherForecast
from pymetweather.get_args import get_command_line_args, get_config_args
locale.setlocale(locale.LC_ALL, '')
class WeatherPrinter(object):
def __init__(self, forecast, screen_width):
self.fcs = forecast
self.cols = [
(['Time'], 5, '{$:02}:00'),
(['Conditions'], 22, '{W}'),
(['Precipitation', 'probability'], 15, '{Pp:>3} %'),
(['Temperature', '(Feels Like)'], 14, '{T:>2} {F} °C'),
(['Wind Speed', '(Gust)'], 16, '{S:>2} {G} mph'),
(['Wind', 'Direction'], 12, '{D:>3}'),
(['Relative', 'Humidity'], 10, '{H} %'),
(['Visibility'], 12, '{V}'),
(['UV', 'Index'], 7, '{U}')]
self.daily_cols = [
(['Day'], 13, '{$}', '{$}'),
(['Conditions'], 22, '{W}', '{W}'),
(['Precipitation', 'probability'], 15,
'{PPd:>3} %', '{PPn:>3} %'),
(['Max day/', 'Min night', 'Temperature', '(Feels like)'], 14,
'{Dm:>2} {FDm} °C', '{Nm:>2} {FNm} °C'),
(['Wind Speed', '(Gust)'], 16,
'{S:>2} {Gn} mph', '{S:>2} {Gm} mph'),
(['Wind', 'Direction'], 12, '{D:>3}', '{D:>3}'),
(['Relative', 'Humidity'], 10, '{Hn} %', '{Hm} %'),
(['Visibility'], 12, '{V}', '{V}')]
self.top_pad = curses.newpad(2000, 500)
self.tab_pad = curses.newpad(2000, 500)
self.bottom_bar = curses.newpad(1, 500)
self.help_screen_pad = curses.newpad(500, 500)
self.top_maxy = 0
self.tab_maxy = 0
self.tab_maxx = 0
self.screen_width = screen_width
self.print_bottom_bar()
self.setup_help()
@staticmethod
def addustr(win, text, *args):
win.addstr(text.encode('utf-8'), *args)
def print_help_screen(self, top_only):
if not top_only:
self.addustr(self.tab_pad, self.help_string)
self.tab_maxy = self.help_maxy
self.tab_maxx = self.help_maxx
def setup_help(self):
help = [
('q', 'Quit'),
('?', 'Show this help'),
('t', "Today's weather"),
('d', 'Five day summary'),
('0', "Today's weather"),
('1', "Tomorrow's weather"),
('2', 'Weather for 2 days later'),
('3', 'Weather for 3 days later'),
('4', 'Weather for 4 days later'),
('5–9', 'UK outlook for the next month'),
('l', 'UK outlook for the next month'),
('left arrow', 'scroll left'),
('right arrow', 'scroll left'),
('up arrow', 'scroll up'),
('down arrow', 'scroll down'),
]
c1width = max([len(k[0]) for k in help])
c2width = max([len(k[1]) for k in help])
self.help_string = ''
for h in help:
self.help_string += h[0].ljust(c1width + 1) + ' : ' + h[1] + '\n'
self.help_string = self.help_string.strip('\n')
self.help_maxy = len(help) - 1
self.help_maxx = c1width + c2width - 1
def print_bottom_bar(self):
self.addustr(
self.bottom_bar, '?: help q: quit t: today '
'd: 5 day summary 1–4: days 1 to 4 '
'l: longterm'.ljust(499),
curses.A_REVERSE | curses.A_BOLD)
def print_longer_term_weather(self):
regf1 = self.fcs.reg_fcs[2]['Paragraph']
regf2 = self.fcs.reg_fcs[3]['Paragraph']
self.addustr(
self.top_pad, self.wrap_text(regf1['title']), curses.A_BOLD)
self.addustr(self.top_pad, '\n' + self.wrap_text(regf1['$']) + '\n\n')
self.addustr(
self.top_pad, self.wrap_text(regf2['title']), curses.A_BOLD)
self.addustr(self.top_pad, '\n' + self.wrap_text(regf2['$']))
self.top_maxy = self.top_pad.getyx()[0] + 1
def wrap_text(self, text):
return fill(text, self.screen_width)
def print_hourly_top(self, n_day, day):
title = 'Weather for {}, {}'.format(
self.fcs.site_name, day.strftime('%A %d %B %Y'))
self.addustr(self.top_pad, self.wrap_text(title) + '\n', curses.A_BOLD)
regfindex = 0
regf = self.fcs.reg_fcs[0]['Paragraph']
if n_day == 0:
if 'Headline' in regf[regfindex]['title']:
self.addustr(self.top_pad, self.wrap_text(regf[regfindex]['$'])
+ '\n\n')
regfindex += 1
if 'Today' in regf[regfindex]['title']:
today_text = self.wrap_text('Today: ' + regf[regfindex]['$'])
self.addustr(self.top_pad, today_text[:7], curses.A_BOLD)
self.addustr(self.top_pad, today_text[7:] + '\n\n')
regfindex += 1
if 'Tonight' in regf[regfindex]['title']:
tonight_text = self.wrap_text(regf[regfindex]['title'] + ' ' +
regf[regfindex]['$'])
lent = len(regf[regfindex]['title'])
self.addustr(self.top_pad, tonight_text[:lent], curses.A_BOLD)
self.addustr(self.top_pad, tonight_text[lent:] + '\n\n')
regfindex += 1
elif n_day == 1:
for regfindex in range(len(regf)):
if day.strftime('%A') in regf[regfindex]['title']:
self.addustr(
self.top_pad,
self.wrap_text(regf[regfindex]['$']) + '\n\n')
break
else:
regf = self.fcs.reg_fcs[1]['Paragraph']
outlook = self.wrap_text(regf['title'] + ' ' + regf['$'])
lent = len(regf['title']) + 1
self.addustr(self.top_pad, '\n' + outlook[:lent], curses.A_BOLD)
self.addustr(self.top_pad, outlook[lent:] + '\n\n')
self.top_maxy = self.top_pad.getyx()[0] + 1
def print_hourly_tab(self, n_day, period):
width_counter = 0
for c in self.cols:
for i, head in enumerate(c[0]):
head_text = '{:^{}}'.format(head, c[1])
self.tab_pad.move(i, width_counter)
self.addustr(self.tab_pad, head_text, curses.A_BOLD)
width_counter += c[1]
top_row = (
self.tab_pad.getyx()[0] + max([len(c[0]) for c in self.cols]) - 1)
for i, rep in enumerate(period['Rep']):
width_counter = 0
for c in self.cols:
cell_text = '{:^{}}'.format(c[2].format(**rep), c[1])
self.tab_pad.move(top_row + i, width_counter)
self.addustr(self.tab_pad, cell_text)
width_counter += c[1]
self.tab_maxy = self.tab_pad.getyx()[0]
self.tab_maxx = sum([c[1] for c in self.cols]) - 2
def print_hourly_weather(self, n_day, top_only=False):
day = date.today() + timedelta(n_day)
period = self.fcs.hourly_fcs['Period'][n_day]
assert period['value'] == day.strftime('%Y-%m-%dZ')
self.print_hourly_top(n_day, day)
if not top_only:
self.print_hourly_tab(n_day, period)
def print_weather_brief(self, top_only=False):
period = self.fcs.daily_fcs['Period']
width_counter = 0
for c in self.daily_cols:
for i, head in enumerate(c[0]):
head_text = '{:^{}}'.format(head, c[1])
self.tab_pad.move(i, width_counter)
self.addustr(self.tab_pad, head_text, curses.A_BOLD)
width_counter += c[1]
top_row = (
self.tab_pad.getyx()[0] +
max([len(c[0]) for c in self.daily_cols]))
c = self.daily_cols[0]
for i, rep in enumerate(period):
cell_text = '{:<{}} '.format(rep['value'], c[1] - 3)
self.tab_pad.move(top_row + i * 4, 0)
self.addustr(self.tab_pad, cell_text)
cell_text = '{:>{}} '.format(
c[2].format(**rep['Rep'][0]), c[1] - 3)
self.tab_pad.move(top_row + i * 4 + 1, 0)
self.addustr(self.tab_pad, cell_text)
cell_text = '{:>{}} '.format(
c[3].format(**rep['Rep'][1]), c[1] - 3)
self.tab_pad.move(top_row + i * 4 + 2, 0)
self.addustr(self.tab_pad, cell_text)
for i, rep in enumerate(period):
rep = rep['Rep']
width_counter = self.daily_cols[0][1]
for c in self.daily_cols[1:]:
cell_text = '{:^{}}'.format(c[2].format(**rep[0]), c[1])
self.tab_pad.move(top_row + i * 4 + 1, width_counter)
self.addustr(self.tab_pad, cell_text)
cell_text = '{:^{}}'.format(c[3].format(**rep[1]), c[1])
self.tab_pad.move(top_row + i * 4 + 2, width_counter)
self.addustr(self.tab_pad, cell_text)
width_counter += c[1]
self.tab_maxy = self.tab_pad.getyx()[0]
self.tab_maxx = sum([c[1] for c in self.daily_cols]) - 2
def print_screen(self, screen, screen_width=None, top_only=False):
if screen_width is not None:
self.screen_width = screen_width
self.top_pad.clear()
self.top_maxy = 0
if not top_only:
self.tab_maxy = 0
self.tab_maxx = 0
self.tab_pad.clear()
if screen in range(0, 5):
self.print_hourly_weather(screen, top_only)
elif screen == 8:
self.print_longer_term_weather()
elif screen == 7:
self.print_weather_brief(top_only)
elif screen == 9:
self.print_help_screen(top_only)
class WeatherApp(object):
key_map = {
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4,
'5': 8, '6': 8, '7': 8, '8': 8, '9': 9,
't': 0,
'l': 8,
'd': 7,
'b': 7,
'?': 9}
def __init__(self, stdscr, fcs, start_screen=0):
self.stdscr = stdscr
curses.curs_set(0)
curses.use_default_colors()
self.fcs = fcs
self.scrolly = 0
self.scrollx = 0
self.maxy = 0
self.maxx = 0
self.y = self.stdscr.getmaxyx()[0] - 1
self.x = self.stdscr.getmaxyx()[1] - 1
self.printer = WeatherPrinter(self.fcs, self.x + 1)
self.print_screen(start_screen)
def print_resize(self):
self.y = self.stdscr.getmaxyx()[0] - 1
self.x = self.stdscr.getmaxyx()[1] - 1
self.printer.print_screen(self.screen_showing, self.x + 1, True)
self.maxx = max(self.printer.tab_maxx, self.x - 1)
self.maxy = self.printer.tab_maxy + self.printer.top_maxy
if self.y > (self.maxy - self.scrolly):
self.scrolly = max(self.maxy - (self.y - 1), 0)
if self.x > (self.maxx - self.scrollx):
self.scrollx = max(self.maxx - (self.x - 1), 0)
self.draw_screen()
def print_screen(self, screen):
self.screen_showing = screen
self.scrolly = 0
self.scrollx = 0
self.printer.print_screen(self.screen_showing)
self.maxy = self.printer.tab_maxy + self.printer.top_maxy
self.maxx = max(self.printer.tab_maxx, self.x - 1)
self.draw_screen()
def draw_screen(self):
self.stdscr.clear()
self.stdscr.refresh()
top_y = self.printer.top_maxy
try:
assert self.y == self.stdscr.getmaxyx()[0] - 1
assert self.x == self.stdscr.getmaxyx()[1] - 1
except AssertionError:
self.print_resize()
return
self.printer.top_pad.noutrefresh(
self.scrolly, 0, 0, 0, min(top_y, self.y), self.x)
if self.y - (top_y - self.scrolly) > 1:
self.printer.tab_pad.noutrefresh(
max(0, self.scrolly - top_y), self.scrollx,
top_y - self.scrolly, 0,
self.y, self.x)
self.printer.bottom_bar.noutrefresh(
0, 0, self.y, 0, self.y, self.x)
try:
assert self.y == self.stdscr.getmaxyx()[0] - 1
assert self.x == self.stdscr.getmaxyx()[1] - 1
except AssertionError:
self.print_resize()
return
with open('/tmp/log', 'a') as f:
f.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(
self.maxy, self.y, self.scrolly,
self.maxx, self.x, self.scrollx))
curses.doupdate()
def main_loop(self):
while True:
c = self.stdscr.getkey()
if c == 'q':
return
elif c in self.key_map and self.screen_showing != self.key_map[c]:
self.print_screen(self.key_map[c])
elif c == 'KEY_RESIZE':
self.print_resize()
elif c == 'KEY_DOWN':
if self.scrolly + self.y - 1 < self.maxy:
self.scrolly += 1
self.draw_screen()
elif c == 'KEY_UP' and self.scrolly != 0:
self.scrolly -= 1
self.draw_screen()
elif c == 'KEY_LEFT' and self.scrollx != 0:
self.scrollx -= 1
self.draw_screen()
elif c == 'KEY_RIGHT':
if self.scrollx + self.x - 1 < self.maxx:
self.scrollx += 1
self.draw_screen()
def run_curses_app(screen, fcs):
wap = WeatherApp(screen, fcs)
wap.main_loop()
def run_app(args):
fcs = WeatherForecast(args['api_key'], args['location'], args['datadir'])
if args['quiet_update']:
fcs.load(True)
return
fcs.load(args['dont_update'])
curses.wrapper(run_curses_app, fcs)
def main():
args = get_config_args()
args.update(get_command_line_args())
run_app(args)
| gpl-2.0 | -7,420,689,951,984,828,000 | 35.413613 | 79 | 0.492955 | false |
jcarva/digital_image_processing_assignments | spatial_domain/python/task1_6.py | 1 | 1722 | # coding=UTF-8
# 1.6. Limiarização aplicada sobre Y, com limiar m e duas opções: a) m
# escolhido pelo usuáio; b) m = média de valores da banda Y;
import numpy as np
import utils
import color
def main():
image = utils.load_image('lenna.png')
yiq_image = color.rgb2yiq(image)
grayscale_image = yiq_image[:, :, 2] # Y
threshold_value = 255 * 0.2
mean_value = np.mean(grayscale_image)
threshold_user_image = _segment(grayscale_image, threshold_value)
original_threshold_user_image = np.copy(yiq_image)
original_threshold_user_image[:, :, 2] = threshold_user_image
original_threshold_user_image = color.yiq2rgb(original_threshold_user_image)
threshold_mean_image = _segment(grayscale_image, mean_value)
original_threshold_mean_image = np.copy(yiq_image)
original_threshold_mean_image[:, :, 2] = threshold_mean_image
original_threshold_mean_image = color.yiq2rgb(original_threshold_mean_image)
utils.display_single_image('Original Image', image)
utils.display_single_image('YIQ Image', yiq_image)
utils.display_single_image('Y Channel', grayscale_image)
utils.display_single_image('Y Threshold (User ' + str(threshold_value) + ')', threshold_user_image)
utils.display_single_image('Back to Original (User ' + str(threshold_value) + ')', original_threshold_user_image)
utils.display_single_image('Y Threshold (Mean ' + str(mean_value) + ')', threshold_mean_image)
utils.display_single_image('Back to Original (Mean ' + str(mean_value) + ')', original_threshold_mean_image)
utils.wait_key_and_destroy_windows()
def _segment(image, m):
output = (image >= m) * 255
return output
if __name__ == "__main__":
main() | gpl-3.0 | -4,424,867,651,343,764,500 | 34.770833 | 117 | 0.689977 | false |
PXke/invenio | invenio/legacy/websubmit/functions/Create_Modify_Interface.py | 1 | 12922 | ## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This is the Create_Modify_Interface function (along with its helpers).
It is used by WebSubmit for the "Modify Bibliographic Information" action.
"""
__revision__ = "$Id$"
import os
import re
import time
import pprint
from invenio.legacy.dbquery import run_sql
from invenio.legacy.websubmit.config import InvenioWebSubmitFunctionError
from invenio.legacy.websubmit.functions.Retrieve_Data import Get_Field
from invenio.ext.logging import register_exception
def Create_Modify_Interface_getfieldval_fromfile(cur_dir, fld=""):
"""Read a field's value from its corresponding text file in 'cur_dir' (if it exists) into memory.
Delete the text file after having read-in its value.
This function is called on the reload of the modify-record page. This way, the field in question
can be populated with the value last entered by the user (before reload), instead of always being
populated with the value still found in the DB.
"""
fld_val = ""
if len(fld) > 0 and os.access("%s/%s" % (cur_dir, fld), os.R_OK|os.W_OK):
fp = open( "%s/%s" % (cur_dir, fld), "r" )
fld_val = fp.read()
fp.close()
try:
os.unlink("%s/%s"%(cur_dir, fld))
except OSError:
# Cannot unlink file - ignore, let WebSubmit main handle this
pass
fld_val = fld_val.strip()
return fld_val
def Create_Modify_Interface_getfieldval_fromDBrec(fieldcode, recid):
"""Read a field's value from the record stored in the DB.
This function is called when the Create_Modify_Interface function is called for the first time
when modifying a given record, and field values must be retrieved from the database.
"""
fld_val = ""
if fieldcode != "":
for next_field_code in [x.strip() for x in fieldcode.split(",")]:
fld_val += "%s\n" % Get_Field(next_field_code, recid)
fld_val = fld_val.rstrip('\n')
return fld_val
def Create_Modify_Interface_transform_date(fld_val):
"""Accept a field's value as a string. If the value is a date in one of the following formats:
DD Mon YYYY (e.g. 23 Apr 2005)
YYYY-MM-DD (e.g. 2005-04-23)
...transform this date value into "DD/MM/YYYY" (e.g. 23/04/2005).
"""
if re.search("^[0-9]{2} [a-z]{3} [0-9]{4}$", fld_val, re.IGNORECASE) is not None:
try:
fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%d %b %Y"))
except (ValueError, TypeError):
# bad date format:
pass
elif re.search("^[0-9]{4}-[0-9]{2}-[0-9]{2}$", fld_val, re.IGNORECASE) is not None:
try:
fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%Y-%m-%d"))
except (ValueError,TypeError):
# bad date format:
pass
return fld_val
def Create_Modify_Interface(parameters, curdir, form, user_info=None):
"""
Create an interface for the modification of a document, based on
the fields that the user has chosen to modify. This avoids having
to redefine a submission page for the modifications, but rely on
the elements already defined for the initial submission i.e. SBI
action (The only page that needs to be built for the modification
is the page letting the user specify a document to modify).
This function should be added at step 1 of your modification
workflow, after the functions that retrieves report number and
record id (Get_Report_Number, Get_Recid). Functions at step 2 are
the one executed upon successful submission of the form.
Create_Modify_Interface expects the following parameters:
* "fieldnameMBI" - the name of a text file in the submission
working directory that contains a list of the names of the
WebSubmit fields to include in the Modification interface.
These field names are separated by"\n" or "+".
Given the list of WebSubmit fields to be included in the
modification interface, the values for each field are retrieved
for the given record (by way of each WebSubmit field being
configured with a MARC Code in the WebSubmit database). An HTML
FORM is then created. This form allows a user to modify certain
field values for a record.
The file referenced by 'fieldnameMBI' is usually generated from a
multiple select form field): users can then select one or several
fields to modify
Note that the function will display WebSubmit Response elements,
but will not be able to set an initial value: this must be done by
the Response element iteself.
Additionally the function creates an internal field named
'Create_Modify_Interface_DONE' on the interface, that can be
retrieved in curdir after the form has been submitted.
This flag is an indicator for the function that displayed values
should not be retrieved from the database, but from the submitted
values (in case the page is reloaded). You can also rely on this
value when building your WebSubmit Response element in order to
retrieve value either from the record, or from the submission
directory.
"""
global sysno,rn
t = ""
# variables declaration
fieldname = parameters['fieldnameMBI']
# Path of file containing fields to modify
the_globals = {
'doctype' : doctype,
'action' : action,
'act' : action, ## for backward compatibility
'step' : step,
'access' : access,
'ln' : ln,
'curdir' : curdir,
'uid' : user_info['uid'],
'uid_email' : user_info['email'],
'rn' : rn,
'last_step' : last_step,
'action_score' : action_score,
'__websubmit_in_jail__' : True,
'form': form,
'sysno': sysno,
'user_info' : user_info,
'__builtins__' : globals()['__builtins__'],
'Request_Print': Request_Print
}
if os.path.exists("%s/%s" % (curdir, fieldname)):
fp = open( "%s/%s" % (curdir, fieldname), "r" )
fieldstext = fp.read()
fp.close()
fieldstext = re.sub("\+","\n", fieldstext)
fields = fieldstext.split("\n")
else:
res = run_sql("SELECT fidesc FROM sbmFIELDDESC WHERE name=%s", (fieldname,))
if len(res) == 1:
fields = res[0][0].replace(" ", "")
fields = re.findall("<optionvalue=.*>", fields)
regexp = re.compile("""<optionvalue=(?P<quote>['|"]?)(?P<value>.*?)(?P=quote)""")
fields = [regexp.search(x) for x in fields]
fields = [x.group("value") for x in fields if x is not None]
fields = [x for x in fields if x not in ("Select", "select")]
else:
raise InvenioWebSubmitFunctionError("cannot find fields to modify")
#output some text
t = t+"<CENTER bgcolor=\"white\">The document <B>%s</B> has been found in the database.</CENTER><br />Please modify the following fields:<br />Then press the 'END' button at the bottom of the page<br />\n" % rn
for field in fields:
subfield = ""
value = ""
marccode = ""
text = ""
# retrieve and display the modification text
t = t + "<FONT color=\"darkblue\">\n"
res = run_sql("SELECT modifytext FROM sbmFIELDDESC WHERE name=%s", (field,))
if len(res)>0:
t = t + "<small>%s</small> </FONT>\n" % res[0][0]
# retrieve the marc code associated with the field
res = run_sql("SELECT marccode FROM sbmFIELDDESC WHERE name=%s", (field,))
if len(res) > 0:
marccode = res[0][0]
# then retrieve the previous value of the field
if os.path.exists("%s/%s" % (curdir, "Create_Modify_Interface_DONE")):
# Page has been reloaded - get field value from text file on server, not from DB record
value = Create_Modify_Interface_getfieldval_fromfile(curdir, field)
else:
# First call to page - get field value from DB record
value = Create_Modify_Interface_getfieldval_fromDBrec(marccode, sysno)
# If field is a date value, transform date into format DD/MM/YYYY:
value = Create_Modify_Interface_transform_date(value)
res = run_sql("SELECT * FROM sbmFIELDDESC WHERE name=%s", (field,))
if len(res) > 0:
element_type = res[0][3]
numcols = res[0][6]
numrows = res[0][5]
size = res[0][4]
maxlength = res[0][7]
val = res[0][8]
fidesc = res[0][9]
if element_type == "T":
text = "<TEXTAREA name=\"%s\" rows=%s cols=%s wrap>%s</TEXTAREA>" % (field, numrows, numcols, value)
elif element_type == "F":
text = "<INPUT TYPE=\"file\" name=\"%s\" size=%s maxlength=\"%s\">" % (field, size, maxlength)
elif element_type == "I":
value = re.sub("[\n\r\t]+", "", value)
text = "<INPUT name=\"%s\" size=%s value=\"%s\"> " % (field, size, val)
text = text + "<SCRIPT>document.forms[0].%s.value=\"%s\";</SCRIPT>" % (field, value)
elif element_type == "H":
text = "<INPUT type=\"hidden\" name=\"%s\" value=\"%s\">" % (field, val)
text = text + "<SCRIPT>document.forms[0].%s.value=\"%s\";</SCRIPT>" % (field, value)
elif element_type == "S":
values = re.split("[\n\r]+", value)
text = fidesc
if re.search("%s\[\]" % field, fidesc):
multipletext = "[]"
else:
multipletext = ""
if len(values) > 0 and not(len(values) == 1 and values[0] == ""):
text += "<SCRIPT>\n"
text += "var i = 0;\n"
text += "el = document.forms[0].elements['%s%s'];\n" % (field, multipletext)
text += "max = el.length;\n"
for val in values:
text += "var found = 0;\n"
text += "var i=0;\n"
text += "while (i != max) {\n"
text += " if (el.options[i].value == \"%s\" || el.options[i].text == \"%s\") {\n" % (val, val)
text += " el.options[i].selected = true;\n"
text += " found = 1;\n"
text += " }\n"
text += " i=i+1;\n"
text += "}\n"
#text += "if (found == 0) {\n"
#text += " el[el.length] = new Option(\"%s\", \"%s\", 1,1);\n"
#text += "}\n"
text += "</SCRIPT>\n"
elif element_type == "D":
text = fidesc
elif element_type == "R":
try:
co = compile(fidesc.replace("\r\n", "\n"), "<string>", "exec")
## Note this exec is safe WRT global variable because the
## Create_Modify_Interface has already been parsed by
## execfile within a protected environment.
the_globals['text'] = ''
exec co in the_globals
text = the_globals['text']
except:
msg = "Error in evaluating response element %s with globals %s" % (pprint.pformat(field), pprint.pformat(globals()))
register_exception(req=None, alert_admin=True, prefix=msg)
raise InvenioWebSubmitFunctionError(msg)
else:
text = "%s: unknown field type" % field
t = t + "<small>%s</small>" % text
# output our flag field
t += '<input type="hidden" name="Create_Modify_Interface_DONE" value="DONE\n" />'
# output some more text
t = t + "<br /><br /><CENTER><small><INPUT type=\"button\" width=400 height=50 name=\"End\" value=\"END\" onClick=\"document.forms[0].step.value = 2;user_must_confirm_before_leaving_page = false;document.forms[0].submit();\"></small></CENTER></H4>"
return t
| gpl-2.0 | 429,905,731,939,165,250 | 46.682657 | 252 | 0.580019 | false |
quonb/atom-generator | atom_generator/video.py | 1 | 2028 | import re
class YouTube(object):
def __init__(self, url=None):
self._video_id = self._extract_id(url)
def __call__(self, url=False):
if url is None or url:
self._video_id = self._extract_id(url)
return self._video_id
def _extract_id(self, url=None):
"""Extract youtube video ID
Based on `youtube_dl` code
"""
if not url:
return None
YOUTUBE_URL = r"""^
(?:
(?:https?://)? # http(s):// (optional)
(?:(?:(?:
(?:\w+\.)?youtube(?:-nocookie)?\.com/|
tube\.majestyc\.net/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/)| # v/ or embed/ or e/
(?: # or the v= param in all its forms
(?:
(?:watch|movie)(?:_popup)?(?:\.php)?
)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx)
v=
)
))|
youtu\.be/ # just youtu.be/xxxx
)
)? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?(1).+)? # if we found the ID, everything can follow
$"""
video_id = re.match(YOUTUBE_URL, str(url), re.VERBOSE)
return video_id and video_id.group(1)
def thumbnail(self):
return self._video_id and "http://i.ytimg.com/vi/%s/0.jpg" % self._video_id
def video(self):
return self._video_id and "http://www.youtube.com/watch?v=%s" % self._video_id
| apache-2.0 | -9,203,827,087,365,975,000 | 37.264151 | 97 | 0.446746 | false |
nikolhm/Pokus | knownpaths.py | 1 | 9583 | import ctypes, sys
from ctypes import windll, wintypes
from uuid import UUID
class GUID(ctypes.Structure): # [1]
_fields_ = [
("Data1", wintypes.DWORD),
("Data2", wintypes.WORD),
("Data3", wintypes.WORD),
("Data4", wintypes.BYTE * 8)
]
def __init__(self, uuid_):
ctypes.Structure.__init__(self)
self.Data1, self.Data2, self.Data3, self.Data4[0], self.Data4[1], rest = uuid_.fields
for i in range(2, 8):
self.Data4[i] = rest>>(8 - i - 1)*8 & 0xff
class FOLDERID: # [2]
AccountPictures = UUID('{008ca0b1-55b4-4c56-b8a8-4de4b299d3be}')
AdminTools = UUID('{724EF170-A42D-4FEF-9F26-B60E846FBA4F}')
ApplicationShortcuts = UUID('{A3918781-E5F2-4890-B3D9-A7E54332328C}')
CameraRoll = UUID('{AB5FB87B-7CE2-4F83-915D-550846C9537B}')
CDBurning = UUID('{9E52AB10-F80D-49DF-ACB8-4330F5687855}')
CommonAdminTools = UUID('{D0384E7D-BAC3-4797-8F14-CBA229B392B5}')
CommonOEMLinks = UUID('{C1BAE2D0-10DF-4334-BEDD-7AA20B227A9D}')
CommonPrograms = UUID('{0139D44E-6AFE-49F2-8690-3DAFCAE6FFB8}')
CommonStartMenu = UUID('{A4115719-D62E-491D-AA7C-E74B8BE3B067}')
CommonStartup = UUID('{82A5EA35-D9CD-47C5-9629-E15D2F714E6E}')
CommonTemplates = UUID('{B94237E7-57AC-4347-9151-B08C6C32D1F7}')
Contacts = UUID('{56784854-C6CB-462b-8169-88E350ACB882}')
Cookies = UUID('{2B0F765D-C0E9-4171-908E-08A611B84FF6}')
Desktop = UUID('{B4BFCC3A-DB2C-424C-B029-7FE99A87C641}')
DeviceMetadataStore = UUID('{5CE4A5E9-E4EB-479D-B89F-130C02886155}')
Documents = UUID('{FDD39AD0-238F-46AF-ADB4-6C85480369C7}')
DocumentsLibrary = UUID('{7B0DB17D-9CD2-4A93-9733-46CC89022E7C}')
Downloads = UUID('{374DE290-123F-4565-9164-39C4925E467B}')
Favorites = UUID('{1777F761-68AD-4D8A-87BD-30B759FA33DD}')
Fonts = UUID('{FD228CB7-AE11-4AE3-864C-16F3910AB8FE}')
GameTasks = UUID('{054FAE61-4DD8-4787-80B6-090220C4B700}')
History = UUID('{D9DC8A3B-B784-432E-A781-5A1130A75963}')
ImplicitAppShortcuts = UUID('{BCB5256F-79F6-4CEE-B725-DC34E402FD46}')
InternetCache = UUID('{352481E8-33BE-4251-BA85-6007CAEDCF9D}')
Libraries = UUID('{1B3EA5DC-B587-4786-B4EF-BD1DC332AEAE}')
Links = UUID('{bfb9d5e0-c6a9-404c-b2b2-ae6db6af4968}')
LocalAppData = UUID('{F1B32785-6FBA-4FCF-9D55-7B8E7F157091}')
LocalAppDataLow = UUID('{A520A1A4-1780-4FF6-BD18-167343C5AF16}')
LocalizedResourcesDir = UUID('{2A00375E-224C-49DE-B8D1-440DF7EF3DDC}')
Music = UUID('{4BD8D571-6D19-48D3-BE97-422220080E43}')
MusicLibrary = UUID('{2112AB0A-C86A-4FFE-A368-0DE96E47012E}')
NetHood = UUID('{C5ABBF53-E17F-4121-8900-86626FC2C973}')
OriginalImages = UUID('{2C36C0AA-5812-4b87-BFD0-4CD0DFB19B39}')
PhotoAlbums = UUID('{69D2CF90-FC33-4FB7-9A0C-EBB0F0FCB43C}')
PicturesLibrary = UUID('{A990AE9F-A03B-4E80-94BC-9912D7504104}')
Pictures = UUID('{33E28130-4E1E-4676-835A-98395C3BC3BB}')
Playlists = UUID('{DE92C1C7-837F-4F69-A3BB-86E631204A23}')
PrintHood = UUID('{9274BD8D-CFD1-41C3-B35E-B13F55A758F4}')
Profile = UUID('{5E6C858F-0E22-4760-9AFE-EA3317B67173}')
ProgramData = UUID('{62AB5D82-FDC1-4DC3-A9DD-070D1D495D97}')
ProgramFiles = UUID('{905e63b6-c1bf-494e-b29c-65b732d3d21a}')
ProgramFilesX64 = UUID('{6D809377-6AF0-444b-8957-A3773F02200E}')
ProgramFilesX86 = UUID('{7C5A40EF-A0FB-4BFC-874A-C0F2E0B9FA8E}')
ProgramFilesCommon = UUID('{F7F1ED05-9F6D-47A2-AAAE-29D317C6F066}')
ProgramFilesCommonX64 = UUID('{6365D5A7-0F0D-45E5-87F6-0DA56B6A4F7D}')
ProgramFilesCommonX86 = UUID('{DE974D24-D9C6-4D3E-BF91-F4455120B917}')
Programs = UUID('{A77F5D77-2E2B-44C3-A6A2-ABA601054A51}')
Public = UUID('{DFDF76A2-C82A-4D63-906A-5644AC457385}')
PublicDesktop = UUID('{C4AA340D-F20F-4863-AFEF-F87EF2E6BA25}')
PublicDocuments = UUID('{ED4824AF-DCE4-45A8-81E2-FC7965083634}')
PublicDownloads = UUID('{3D644C9B-1FB8-4f30-9B45-F670235F79C0}')
PublicGameTasks = UUID('{DEBF2536-E1A8-4c59-B6A2-414586476AEA}')
PublicLibraries = UUID('{48DAF80B-E6CF-4F4E-B800-0E69D84EE384}')
PublicMusic = UUID('{3214FAB5-9757-4298-BB61-92A9DEAA44FF}')
PublicPictures = UUID('{B6EBFB86-6907-413C-9AF7-4FC2ABF07CC5}')
PublicRingtones = UUID('{E555AB60-153B-4D17-9F04-A5FE99FC15EC}')
PublicUserTiles = UUID('{0482af6c-08f1-4c34-8c90-e17ec98b1e17}')
PublicVideos = UUID('{2400183A-6185-49FB-A2D8-4A392A602BA3}')
QuickLaunch = UUID('{52a4f021-7b75-48a9-9f6b-4b87a210bc8f}')
Recent = UUID('{AE50C081-EBD2-438A-8655-8A092E34987A}')
RecordedTVLibrary = UUID('{1A6FDBA2-F42D-4358-A798-B74D745926C5}')
ResourceDir = UUID('{8AD10C31-2ADB-4296-A8F7-E4701232C972}')
Ringtones = UUID('{C870044B-F49E-4126-A9C3-B52A1FF411E8}')
RoamingAppData = UUID('{3EB685DB-65F9-4CF6-A03A-E3EF65729F3D}')
RoamedTileImages = UUID('{AAA8D5A5-F1D6-4259-BAA8-78E7EF60835E}')
RoamingTiles = UUID('{00BCFC5A-ED94-4e48-96A1-3F6217F21990}')
SampleMusic = UUID('{B250C668-F57D-4EE1-A63C-290EE7D1AA1F}')
SamplePictures = UUID('{C4900540-2379-4C75-844B-64E6FAF8716B}')
SamplePlaylists = UUID('{15CA69B3-30EE-49C1-ACE1-6B5EC372AFB5}')
SampleVideos = UUID('{859EAD94-2E85-48AD-A71A-0969CB56A6CD}')
SavedGames = UUID('{4C5C32FF-BB9D-43b0-B5B4-2D72E54EAAA4}')
SavedSearches = UUID('{7d1d3a04-debb-4115-95cf-2f29da2920da}')
Screenshots = UUID('{b7bede81-df94-4682-a7d8-57a52620b86f}')
SearchHistory = UUID('{0D4C3DB6-03A3-462F-A0E6-08924C41B5D4}')
SearchTemplates = UUID('{7E636BFE-DFA9-4D5E-B456-D7B39851D8A9}')
SendTo = UUID('{8983036C-27C0-404B-8F08-102D10DCFD74}')
SidebarDefaultParts = UUID('{7B396E54-9EC5-4300-BE0A-2482EBAE1A26}')
SidebarParts = UUID('{A75D362E-50FC-4fb7-AC2C-A8BEAA314493}')
SkyDrive = UUID('{A52BBA46-E9E1-435f-B3D9-28DAA648C0F6}')
SkyDriveCameraRoll = UUID('{767E6811-49CB-4273-87C2-20F355E1085B}')
SkyDriveDocuments = UUID('{24D89E24-2F19-4534-9DDE-6A6671FBB8FE}')
SkyDrivePictures = UUID('{339719B5-8C47-4894-94C2-D8F77ADD44A6}')
StartMenu = UUID('{625B53C3-AB48-4EC1-BA1F-A1EF4146FC19}')
Startup = UUID('{B97D20BB-F46A-4C97-BA10-5E3608430854}')
System = UUID('{1AC14E77-02E7-4E5D-B744-2EB1AE5198B7}')
SystemX86 = UUID('{D65231B0-B2F1-4857-A4CE-A8E7C6EA7D27}')
Templates = UUID('{A63293E8-664E-48DB-A079-DF759E0509F7}')
UserPinned = UUID('{9E3995AB-1F9C-4F13-B827-48B24B6C7174}')
UserProfiles = UUID('{0762D272-C50A-4BB0-A382-697DCD729B80}')
UserProgramFiles = UUID('{5CD7AEE2-2219-4A67-B85D-6C9CE15660CB}')
UserProgramFilesCommon = UUID('{BCBD3057-CA5C-4622-B42D-BC56DB0AE516}')
Videos = UUID('{18989B1D-99B5-455B-841C-AB7C74E4DDFC}')
VideosLibrary = UUID('{491E922F-5643-4AF4-A7EB-4E7A138D8174}')
Windows = UUID('{F38BF404-1D43-42F2-9305-67DE0B28FC23}')
class UserHandle: # [3]
current = wintypes.HANDLE(0)
common = wintypes.HANDLE(-1)
_CoTaskMemFree = windll.ole32.CoTaskMemFree # [4]
_CoTaskMemFree.restype= None
_CoTaskMemFree.argtypes = [ctypes.c_void_p]
_SHGetKnownFolderPath = windll.shell32.SHGetKnownFolderPath # [5] [3]
_SHGetKnownFolderPath.argtypes = [
ctypes.POINTER(GUID), wintypes.DWORD, wintypes.HANDLE, ctypes.POINTER(ctypes.c_wchar_p)
]
class PathNotFoundException(Exception): pass
def get_path(folderid, user_handle=UserHandle.common):
fid = GUID(folderid)
pPath = ctypes.c_wchar_p()
S_OK = 0
if _SHGetKnownFolderPath(ctypes.byref(fid), 0, user_handle, ctypes.byref(pPath)) != S_OK:
raise PathNotFoundException()
path = pPath.value
_CoTaskMemFree(pPath)
return path
if __name__ == '__main__':
if len(sys.argv) < 2 or sys.argv[1] in ['-?', '/?']:
print('python knownpaths.py FOLDERID {current|common}')
sys.exit(0)
try:
folderid = getattr(FOLDERID, sys.argv[1])
except AttributeError:
print('Unknown folder id "%s"' % sys.argv[1], file=sys.stderr)
sys.exit(1)
try:
if len(sys.argv) == 2:
print(get_path(folderid))
else:
print(get_path(folderid, getattr(UserHandle, sys.argv[2])))
except PathNotFoundException:
print('Folder not found "%s"' % ' '.join(sys.argv[1:]), file=sys.stderr)
sys.exit(1)
# [1] http://msdn.microsoft.com/en-us/library/windows/desktop/aa373931.aspx
# [2] http://msdn.microsoft.com/en-us/library/windows/desktop/dd378457.aspx
# [3] http://msdn.microsoft.com/en-us/library/windows/desktop/bb762188.aspx
# [4] http://msdn.microsoft.com/en-us/library/windows/desktop/ms680722.aspx
# [5] http://www.themacaque.com/?p=954
| mit | -3,393,526,364,773,057,500 | 57.432927 | 93 | 0.627883 | false |
tanghaibao/jcvi | jcvi/projects/vanilla.py | 1 | 11915 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Plotting scripts for the vanilla genome paper.
"""
import logging
import sys
from jcvi.apps.base import ActionDispatcher, OptionParser
from jcvi.compara.synteny import AnchorFile, check_beds
from jcvi.formats.base import get_number
from jcvi.formats.bed import Bed
from jcvi.graphics.base import normalize_axes, panel_labels, plt, savefig
from jcvi.graphics.glyph import TextCircle
from jcvi.graphics.synteny import Synteny, draw_gene_legend
def main():
actions = (
# Chromosome painting since WGD
("ancestral", "paint 14 chromosomes following alpha WGD (requires data)"),
# main figures in text
("ploidy", "plot vanilla synteny (requires data)"),
# Composite phylogeny - tree and ks
("phylogeny", "create a composite figure with tree and ks"),
("tree", "create a separate figure with tree"),
("ks", "create a separate figure with ks"),
# Composite synteny - wgd and microsynteny
("synteny", "create a composite figure with wgd and microsynteny"),
("wgd", "create separate figures with wgd"),
("microsynteny", "create separate figures with microsynteny"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def phylogeny(args):
"""
%prog phylogeny treefile ks.layout
Create a composite figure with (A) tree and (B) ks.
"""
from jcvi.graphics.tree import parse_tree, LeafInfoFile, WGDInfoFile, draw_tree
p = OptionParser(phylogeny.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="10x12")
(datafile, layoutfile) = args
logging.debug("Load tree file `{0}`".format(datafile))
t, hpd = parse_tree(datafile)
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
ax1 = fig.add_axes([0, 0.4, 1, 0.6])
ax2 = fig.add_axes([0.12, 0.065, 0.8, 0.3])
margin, rmargin = 0.1, 0.2 # Left and right margin
leafinfo = LeafInfoFile("leafinfo.csv").cache
wgdinfo = WGDInfoFile("wgdinfo.csv").cache
outgroup = "ginkgo"
# Panel A
draw_tree(
ax1,
t,
hpd=hpd,
margin=margin,
rmargin=rmargin,
supportcolor=None,
internal=False,
outgroup=outgroup,
reroot=False,
leafinfo=leafinfo,
wgdinfo=wgdinfo,
geoscale=True,
)
from jcvi.apps.ks import Layout, KsPlot, KsFile
# Panel B
ks_min = 0.0
ks_max = 3.0
bins = 60
fill = False
layout = Layout(layoutfile)
print(layout, file=sys.stderr)
kp = KsPlot(ax2, ks_max, bins, legendp="upper right")
for lo in layout:
data = KsFile(lo.ksfile)
data = [x.ng_ks for x in data]
data = [x for x in data if ks_min <= x <= ks_max]
kp.add_data(
data,
lo.components,
label=lo.label,
color=lo.color,
marker=lo.marker,
fill=fill,
fitted=False,
kde=True,
)
kp.draw(filename=None)
normalize_axes([root, ax1])
labels = ((0.05, 0.95, "A"), (0.05, 0.4, "B"))
panel_labels(root, labels)
image_name = "phylogeny.pdf"
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def tree(args):
"""
%prog tree treefile
Create a tree figure.
"""
from jcvi.graphics.tree import parse_tree, LeafInfoFile, WGDInfoFile, draw_tree
p = OptionParser(tree.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="10x8")
(datafile,) = args
logging.debug("Load tree file `{0}`".format(datafile))
t, hpd = parse_tree(datafile)
fig = plt.figure(1, (iopts.w, iopts.h))
ax1 = fig.add_axes([0, 0, 1, 1])
margin, rmargin = 0.1, 0.2 # Left and right margin
leafinfo = LeafInfoFile("leafinfo.csv").cache
wgdinfo = WGDInfoFile("wgdinfo.csv").cache
outgroup = "ginkgo"
# Panel A
draw_tree(
ax1,
t,
hpd=hpd,
margin=margin,
rmargin=rmargin,
supportcolor=None,
internal=False,
outgroup=outgroup,
reroot=False,
leafinfo=leafinfo,
wgdinfo=wgdinfo,
geoscale=True,
)
normalize_axes([ax1])
image_name = "tree.pdf"
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def ks(args):
"""
%prog ks ks.layout
Create a ks figure.
"""
p = OptionParser(ks.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="10x4")
(layoutfile,) = args
from jcvi.apps.ks import Layout, KsPlot, KsFile
fig = plt.figure(1, (iopts.w, iopts.h))
ax2 = fig.add_axes([0.12, 0.12, 0.8, 0.8])
# Panel B
ks_min = 0.0
ks_max = 3.0
bins = 60
fill = False
layout = Layout(layoutfile)
print(layout, file=sys.stderr)
kp = KsPlot(ax2, ks_max, bins, legendp="upper right")
for lo in layout:
data = KsFile(lo.ksfile)
data = [x.ng_ks for x in data]
data = [x for x in data if ks_min <= x <= ks_max]
kp.add_data(
data,
lo.components,
label=lo.label,
color=lo.color,
marker=lo.marker,
fill=fill,
fitted=False,
kde=True,
)
kp.draw(filename=None)
image_name = "ks.pdf"
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def synteny(args):
"""
%prog synteny vplanifoliaA_blocks.bed vplanifoliaA.sizes \
b1.blocks all.bed b1.layout
Create a composite figure with (A) wgd and (B) microsynteny.
"""
from jcvi.graphics.chromosome import draw_chromosomes
p = OptionParser(synteny.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="12x12")
(bedfile, sizesfile, blocksfile, allbedfile, blockslayout) = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
ax1 = fig.add_axes([0, 0.5, 1, 0.5])
ax2 = fig.add_axes([0.02, 0, 0.98, 0.5])
# Panel A
title = r"Genome duplication $\alpha^{O}$ event in $\textit{Vanilla}$"
draw_chromosomes(
ax1,
bedfile,
sizes=sizesfile,
iopts=iopts,
mergedist=200000,
winsize=50000,
imagemap=False,
gauge=True,
legend=False,
title=title,
)
# Panel B
draw_ploidy(fig, ax2, blocksfile, allbedfile, blockslayout)
normalize_axes([root, ax1, ax2])
labels = ((0.05, 0.95, "A"), (0.05, 0.5, "B"))
panel_labels(root, labels)
image_name = "synteny.pdf"
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def wgd(args):
"""
%prog wgd vplanifoliaA_blocks.bed vplanifoliaA.sizes
Create a wgd figure.
"""
from jcvi.graphics.chromosome import draw_chromosomes
p = OptionParser(synteny.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x5")
(bedfile, sizesfile) = args
fig = plt.figure(1, (iopts.w, iopts.h))
ax1 = fig.add_axes([0, 0, 1, 1])
title = r"Genome duplication $\alpha^{O}$ event in $\textit{Vanilla}$"
draw_chromosomes(
ax1,
bedfile,
sizes=sizesfile,
iopts=iopts,
mergedist=200000,
winsize=50000,
imagemap=False,
gauge=True,
legend=False,
title=title,
)
normalize_axes([ax1])
image_name = "wgd.pdf"
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def microsynteny(args):
"""
%prog microsynteny b1.blocks all.bed b1.layout
Create a microsynteny figure.
"""
p = OptionParser(synteny.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="12x6")
(blocksfile, allbedfile, blockslayout) = args
fig = plt.figure(1, (iopts.w, iopts.h))
ax2 = fig.add_axes([0, 0, 1, 1])
draw_ploidy(fig, ax2, blocksfile, allbedfile, blockslayout)
normalize_axes([ax2])
image_name = "microsynteny.pdf"
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def ancestral(args):
"""
%prog ancestral vplanifoliaA.vplanifoliaA.anchors > vplanifoliaA_blocks.bed
Paint 14 chromosomes following alpha WGD.
"""
p = OptionParser(ancestral.__doc__)
p.set_beds()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(anchorsfile,) = args
qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts)
# We focus on the following chromosome pairs
target_pairs = {
(1, 1),
(1, 6),
(1, 8),
(1, 13),
(2, 4),
(3, 12),
(3, 14),
(5, 6),
(5, 8),
(7, 9),
(7, 11),
(9, 10),
(10, 11),
}
def get_target(achr, bchr):
if "chr" not in achr and "chr" not in bchr:
return None
achr, bchr = get_number(achr), get_number(bchr)
if achr > bchr:
achr, bchr = bchr, achr
if (achr, bchr) in target_pairs:
return achr, bchr
return None
def build_bedline(astart, aend, target_pair):
# target_name = "{:02d}-{:02d}".format(*target_pair)
target_name = [str(x) for x in target_pair if x in (1, 2, 3, 5, 7, 10)][0]
return "\t".join(
str(x) for x in (astart.seqid, astart.start, aend.end, target_name)
)
# Iterate through the blocks, store any regions that has hits to one of the
# target_pairs
ac = AnchorFile(anchorsfile)
blocks = ac.blocks
outbed = Bed()
for i, block in enumerate(blocks):
a, b, scores = zip(*block)
a = [qorder[x] for x in a]
b = [sorder[x] for x in b]
astart, aend = min(a)[1], max(a)[1]
bstart, bend = min(b)[1], max(b)[1]
# Now convert to BED lines with new accn
achr, bchr = astart.seqid, bstart.seqid
target = get_target(achr, bchr)
if target is None:
continue
outbed.add(build_bedline(astart, aend, target))
outbed.add(build_bedline(bstart, bend, target))
outbed.print_to_file(sorted=True)
def ploidy(args):
"""
%prog ploidy b1.blocks all.bed b1.layout
Build a figure that illustrates the WGD history of the vanilla genome.
"""
p = OptionParser(ploidy.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="12x6")
if len(args) != 3:
sys.exit(not p.print_help())
blocksfile, bedfile, blockslayout = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
draw_ploidy(fig, root, blocksfile, bedfile, blockslayout)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "vanilla-karyotype"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def draw_ploidy(fig, root, blocksfile, bedfile, blockslayout):
switchidsfile = "switch.ids"
Synteny(
fig,
root,
blocksfile,
bedfile,
blockslayout,
scalebar=True,
switch=switchidsfile,
)
# Legend showing the orientation of the genes
draw_gene_legend(root, 0.2, 0.3, 0.53)
# WGD labels
radius = 0.025
tau_color = "#bebada"
alpha_color = "#bc80bd"
label_color = "k"
pad = 0.05
for y in (0.74 + 1.5 * pad, 0.26 - 1.5 * pad):
TextCircle(
root,
0.25,
y,
r"$\alpha^{O}$",
radius=radius,
fc=alpha_color,
color=label_color,
fontweight="bold",
)
TextCircle(
root,
0.75,
y,
r"$\alpha^{O}$",
radius=radius,
fc=alpha_color,
color=label_color,
fontweight="bold",
)
for y in (0.74 + 3 * pad, 0.26 - 3 * pad):
TextCircle(
root, 0.5, y, r"$\tau$", radius=radius, fc=tau_color, color=label_color
)
if __name__ == "__main__":
main()
| bsd-2-clause | 6,120,270,252,504,517,000 | 25.07221 | 83 | 0.573059 | false |
cwacek/python-jsonschema-objects | python_jsonschema_objects/wrapper_types.py | 1 | 11522 | import collections
import logging
import six
from python_jsonschema_objects import util
from python_jsonschema_objects.validators import registry, ValidationError
from python_jsonschema_objects.util import lazy_format as fmt
logger = logging.getLogger(__name__)
class ArrayWrapper(collections.abc.MutableSequence):
"""A wrapper for array-like structures.
This implements all of the array like behavior that one would want,
with a dirty-tracking mechanism to avoid constant validation costs.
"""
@property
def strict(self):
return getattr(self, "_strict_", False)
def __len__(self):
return len(self.data)
def mark_or_revalidate(self):
if self.strict:
self.validate()
else:
self._dirty = True
def __delitem__(self, index):
self.data.pop(index)
self.mark_or_revalidate()
def insert(self, index, value):
self.data.insert(index, value)
self.mark_or_revalidate()
def __setitem__(self, index, value):
self.data[index] = value
self.mark_or_revalidate()
def __getitem__(self, idx):
return self.typed_elems[idx]
def __eq__(self, other):
if isinstance(other, ArrayWrapper):
return self.for_json() == other.for_json()
else:
return self.for_json() == other
def __init__(self, ary):
"""Initialize a wrapper for the array
Args:
ary: (list-like, or ArrayWrapper)
"""
""" Marks whether or not the underlying array has been modified """
self._dirty = True
""" Holds a typed copy of the array """
self._typed = None
if isinstance(ary, (list, tuple, collections.abc.Sequence)):
self.data = ary
else:
raise TypeError("Invalid value given to array validator: {0}".format(ary))
logger.debug(fmt("Initializing ArrayWrapper {} with {}", self, ary))
@property
def typed_elems(self):
logger.debug(fmt("Accessing typed_elems of ArrayWrapper {} ", self))
if self._typed is None or self._dirty is True:
self.validate()
return self._typed
def __repr__(self):
return "<%s=%s>" % (self.__class__.__name__, str(self.data))
@classmethod
def from_json(cls, jsonmsg):
import json
msg = json.loads(jsonmsg)
obj = cls(msg)
obj.validate()
return obj
def serialize(self):
enc = util.ProtocolJSONEncoder()
return enc.encode(self.typed_elems)
def for_json(self):
from python_jsonschema_objects import classbuilder
out = []
for item in self.typed_elems:
if isinstance(
item,
(classbuilder.ProtocolBase, classbuilder.LiteralValue, ArrayWrapper),
):
out.append(item.for_json())
else:
out.append(item)
return out
def validate(self):
if self.strict or self._dirty:
self.validate_items()
self.validate_length()
self.validate_uniqueness()
return True
def validate_uniqueness(self):
if getattr(self, "uniqueItems", False) is True:
testset = set(repr(item) for item in self.data)
if len(testset) != len(self.data):
raise ValidationError(
"{0} has duplicate elements, but uniqueness required".format(
self.data
)
)
def validate_length(self):
if getattr(self, "minItems", None) is not None:
if len(self.data) < self.minItems:
raise ValidationError(
"{1} has too few elements. Wanted {0}.".format(
self.minItems, self.data
)
)
if getattr(self, "maxItems", None) is not None:
if len(self.data) > self.maxItems:
raise ValidationError(
"{1} has too many elements. Wanted {0}.".format(
self.maxItems, self.data
)
)
def validate_items(self):
"""Validates the items in the backing array, including
performing type validation.
Sets the _typed property and clears the dirty flag as a side effect
Returns:
The typed array
"""
logger.debug(fmt("Validating {}", self))
from python_jsonschema_objects import classbuilder
if self.__itemtype__ is None:
return
type_checks = self.__itemtype__
if not isinstance(type_checks, (tuple, list)):
# we were given items = {'type': 'blah'} ; thus ensure the type for all data.
type_checks = [type_checks] * len(self.data)
elif len(type_checks) > len(self.data):
raise ValidationError(
"{1} does not have sufficient elements to validate against {0}".format(
self.__itemtype__, self.data
)
)
typed_elems = []
for elem, typ in zip(self.data, type_checks):
if isinstance(typ, dict):
for param, paramval in six.iteritems(typ):
validator = registry(param)
if validator is not None:
validator(paramval, elem, typ)
typed_elems.append(elem)
elif util.safe_issubclass(typ, classbuilder.LiteralValue):
val = typ(elem)
val.validate()
typed_elems.append(val)
elif util.safe_issubclass(typ, classbuilder.ProtocolBase):
if not isinstance(elem, typ):
try:
if isinstance(
elem, (six.string_types, six.integer_types, float)
):
val = typ(elem)
else:
val = typ(**util.coerce_for_expansion(elem))
except TypeError as e:
raise ValidationError(
"'{0}' is not a valid value for '{1}': {2}".format(
elem, typ, e
)
)
else:
val = elem
val.validate()
typed_elems.append(val)
elif util.safe_issubclass(typ, ArrayWrapper):
val = typ(elem)
val.validate()
typed_elems.append(val)
elif isinstance(typ, (classbuilder.TypeProxy, classbuilder.TypeRef)):
try:
if isinstance(elem, (six.string_types, six.integer_types, float)):
val = typ(elem)
else:
val = typ(**util.coerce_for_expansion(elem))
except TypeError as e:
raise ValidationError(
"'{0}' is not a valid value for '{1}': {2}".format(elem, typ, e)
)
else:
val.validate()
typed_elems.append(val)
self._dirty = False
self._typed = typed_elems
return typed_elems
@staticmethod
def create(name, item_constraint=None, **addl_constraints):
"""Create an array validator based on the passed in constraints.
If item_constraint is a tuple, it is assumed that tuple validation
is being performed. If it is a class or dictionary, list validation
will be performed. Classes are assumed to be subclasses of ProtocolBase,
while dictionaries are expected to be basic types ('string', 'number', ...).
addl_constraints is expected to be key-value pairs of any of the other
constraints permitted by JSON Schema v4.
"""
logger.debug(
fmt(
"Constructing ArrayValidator with {} and {}",
item_constraint,
addl_constraints,
)
)
from python_jsonschema_objects import classbuilder
klassbuilder = addl_constraints.pop(
"classbuilder", None
) # type: python_jsonschema_objects.classbuilder.ClassBuilder
props = {}
if item_constraint is not None:
if isinstance(item_constraint, (tuple, list)):
for i, elem in enumerate(item_constraint):
isdict = isinstance(elem, (dict,))
isklass = isinstance(elem, type) and util.safe_issubclass(
elem, (classbuilder.ProtocolBase, classbuilder.LiteralValue)
)
if not any([isdict, isklass]):
raise TypeError(
"Item constraint (position {0}) is not a schema".format(i)
)
elif isinstance(
item_constraint, (classbuilder.TypeProxy, classbuilder.TypeRef)
):
pass
elif util.safe_issubclass(item_constraint, ArrayWrapper):
pass
else:
isdict = isinstance(item_constraint, (dict,))
isklass = isinstance(item_constraint, type) and util.safe_issubclass(
item_constraint,
(classbuilder.ProtocolBase, classbuilder.LiteralValue),
)
if not any([isdict, isklass]):
raise TypeError("Item constraint is not a schema")
if isdict and "$ref" in item_constraint:
if klassbuilder is None:
raise TypeError(
"Cannot resolve {0} without classbuilder".format(
item_constraint["$ref"]
)
)
item_constraint = klassbuilder.resolve_type(
item_constraint["$ref"], name
)
elif isdict and item_constraint.get("type") == "array":
# We need to create a sub-array validator.
item_constraint = ArrayWrapper.create(
name + "#sub",
item_constraint=item_constraint["items"],
addl_constraints=item_constraint,
)
elif isdict and "oneOf" in item_constraint:
# We need to create a TypeProxy validator
uri = "{0}_{1}".format(name, "<anonymous_list_type>")
type_array = klassbuilder.construct_objects(
item_constraint["oneOf"], uri
)
item_constraint = classbuilder.TypeProxy(type_array)
elif isdict and item_constraint.get("type") == "object":
""" We need to create a ProtocolBase object for this anonymous definition"""
uri = "{0}_{1}".format(name, "<anonymous_list_type>")
item_constraint = klassbuilder.construct(uri, item_constraint)
props["__itemtype__"] = item_constraint
strict = addl_constraints.pop("strict", False)
props["_strict_"] = strict
props.update(addl_constraints)
validator = type(str(name), (ArrayWrapper,), props)
return validator
| mit | 6,283,899,650,825,311,000 | 34.343558 | 96 | 0.518486 | false |
franciscogmm/FinancialAnalysisUsingNLPandMachineLearning | SentimentAnalysis - Polarity - Domain Specific Lexicon.py | 1 | 2667 | import csv
import pandas as pd
import nltk
from nltk import FreqDist,ngrams
from nltk.corpus import stopwords
import string
from os import listdir
from os.path import isfile, join
def ngram_list(file,n):
f = open(file,'rU')
raw = f.read()
raw = raw.replace('\n',' ')
#raw = raw.decode('utf8')
#raw = raw.decode("utf-8", 'ignore')
ngramz = ngrams(raw.split(),n)
return ngramz
def IsNotNull(value):
return value is not None and len(value) > 0
mypath = '/Users/francis/Documents/FORDHAM/2nd Term/Text Analytics/' #path where files are located
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
dict_p = []
f = open('positive.txt', 'r')
for line in f:
t = line.strip().lower()
if IsNotNull(t):
dict_p.append(t)
f.close
dict_n = []
f = open('negative.txt', 'r')
for line in f:
t = line.strip().lower()
if IsNotNull(t):
dict_n.append(t)
f.close
totallist = []
rowlist = []
qa = 0
qb = 0
counti = 0
for i in onlyfiles:
if i.endswith('.txt'):
# get code
j = i.replace('.txt','')
# string filename
file = mypath + str(i)
print i
f = open(file,'rU')
raw = f.read()
#print type(raw)
raw = [w.translate(None, string.punctuation) for w in raw]
raw = ''.join(raw)
raw = raw.replace('\n','')
raw = raw.replace(' ','')
#print raw
qa = 0
qb = 0
for word in dict_p:
if word in raw:
qa += 1
for word in dict_n:
if word in raw:
qb += 1
qc = qa - qb
if qc > 0:
sentiment = 'POSITIVE'
elif qc == 0:
sentiment = 'NEUTRAL'
else:
sentiment = 'NEGATIVE'
rowlist.append(i)
rowlist.append(qa)
rowlist.append(qb)
rowlist.append(qc)
rowlist.append(sentiment)
print counti
counti += 1
totallist.append(rowlist)
rowlist = []
else:
pass
labels = ('file', 'P', 'N', 'NET', 'SENTIMENT')
df = pd.DataFrame.from_records(totallist, columns = labels)
df.to_csv('oursentiment.csv', index = False)
#print dict_p
# allbigrams.append(ngram_list(file,2))
# print i + ' BIGRAM - OK'
# alltrigrams.append(ngram_list(file,3))
# print i + ' TRIGRAM - OK'
# allfourgrams.append(ngram_list(file,4))
# print i + ' FOURGRAM - OK'
# allfivegrams.append(ngram_list(file,5))
# print i + ' TRIGRAM - OK'
# allsixgrams.append(ngram_list(file,6))
# print i + ' SIXGRAM - OK'
# allsevengrams.append(ngram_list(file,7))
# print i + ' SEVENGRAM - OK'
# alleightgrams.append(ngram_list(file,8))
# print i + ' EIGHTGRAM - OK' | mit | 7,485,374,827,431,947,000 | 21.420168 | 98 | 0.578178 | false |
sassoftware/mint | mint/django_rest/rbuilder/querysets/views/v1/views.py | 1 | 8001 | #!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django import http
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from mint.django_rest.deco import return_xml, requires, access, xObjRequires
from mint.django_rest.rbuilder import service
# from mint.django_rest.rbuilder.querysets import models
from mint.django_rest.rbuilder.rbac.rbacauth import rbac, manual_rbac
from mint.django_rest.rbuilder.errors import PermissionDenied
from mint.django_rest.rbuilder.rbac.manager.rbacmanager import \
READSET, MODSETDEF
def rbac_can_read_queryset(view, request, query_set_id, *args, **kwargs):
obj = view.mgr.getQuerySet(query_set_id)
if obj.is_public:
# existance of querysets like "All Systems", etc, are not stealthed.
# but may vary in size depending on the user accessing them's permissions
# (ReadMember) on their contents.
return True
user = view.mgr.getSessionInfo().user[0]
ok = view.mgr.userHasRbacPermission(user, obj, READSET)
return ok
def rbac_can_write_queryset(view, request, query_set_id, *args, **kwargs):
obj = view.mgr.getQuerySet(query_set_id)
user = view.mgr.getSessionInfo().user[0]
return view.mgr.userHasRbacPermission(user, obj, MODSETDEF)
class BaseQuerySetService(service.BaseService):
pass
class QuerySetsService(BaseQuerySetService):
# rbac is handled semimanually for this function -- show only
# querysets that we have permission to see
# but don't use full rbac code, because that is implemented using querysets
# and is too meta.
@access.authenticated
@return_xml
def rest_GET(self, request):
user = request._authUser
querysets = self.mgr.getQuerySets()
return self.mgr.filterRbacQuerysets(user, querysets, request)
# not used above, but still needed by load_from_href and other
# functions
def get(self):
return self.mgr.getQuerySets()
@access.admin
@requires('query_set', load=True, save=True)
@return_xml
def rest_POST(self, request, query_set):
return self.mgr.addQuerySet(query_set, request._authUser)
class QuerySetService(BaseQuerySetService):
# rbac is handled semimanually for this function -- show only
# querysets that we have permission to see
# but don't use full rbac code, because that is implemented using querysets
# and is too meta.
@rbac(manual_rbac)
@return_xml
def rest_GET(self, request, query_set_id):
user = request._authUser
queryset = self.mgr.getQuerySet(query_set_id)
if not queryset.is_public and not self.mgr.userHasRbacPermission(
user, queryset, READSET, request
):
raise PermissionDenied()
return queryset
# not used above, but still needed by load_from_href and other
# functions
def get(self, query_set_id):
return self.mgr.getQuerySet(query_set_id)
@access.admin
@requires('query_set')
@return_xml
def rest_PUT(self, request, query_set_id, query_set):
oldQuerySet = self.mgr.getQuerySet(query_set_id)
if oldQuerySet.pk != query_set.pk:
raise PermissionDenied(msg='Attempting to reassign ID')
return self.mgr.updateQuerySet(query_set, request._authUser)
@access.admin
def rest_DELETE(self, request, query_set_id):
querySet = self.mgr.getQuerySet(query_set_id)
self.mgr.deleteQuerySet(querySet)
response = http.HttpResponse(status=204)
return response
class QuerySetAllResultService(BaseQuerySetService):
@access.authenticated
@return_xml
def rest_GET(self, request, query_set_id):
return self.mgr.getQuerySetAllResult(query_set_id, for_user=request._authUser)
class QuerySetUniverseResultService(BaseQuerySetService):
'''the parent queryset of all objects of a given type'''
@access.authenticated
@return_xml
def rest_GET(self, request, query_set_id):
self.mgr.getQuerySetUniverseSet(query_set_id)
url = reverse('QuerySetAllResult', args=[query_set_id])
return HttpResponseRedirect(url)
class QuerySetChosenResultService(BaseQuerySetService):
@access.authenticated
@return_xml
def rest_GET(self, request, query_set_id):
return self.mgr.getQuerySetChosenResult(query_set_id, for_user=request._authUser)
@rbac(rbac_can_write_queryset)
# TODO: source fromc onstant somewhere
@requires(['systems', 'users', 'images', 'targets', 'project_branch_stages', 'projects', 'grants', 'roles'])
@return_xml
def rest_PUT(self, request, query_set_id, *args, **kwargs):
resources = kwargs.items()[0][1]
return self.mgr.addQuerySetChosen(query_set_id, resources, request._authUser)
@rbac(rbac_can_write_queryset)
# TODO: source fromc onstant somewhere
@requires(['system', 'user', 'image', 'target', 'project_branch_stage', 'project_branch', 'project', 'grant', 'role'])
@return_xml
def rest_POST(self, request, query_set_id, *args, **kwargs):
resource = kwargs.items()[0][1]
self.mgr.updateQuerySetChosen(query_set_id, resource, request._authUser)
return resource
@rbac(rbac_can_write_queryset)
# TODO: source fromc onstant somewhere
@requires(['system', 'user', 'image', 'target', 'project_branch_stage', 'project_branch', 'project', 'grant', 'role'])
@return_xml
def rest_DELETE(self, request, query_set_id, *args, **kwargs):
resource = kwargs.items()[0][1]
return self.mgr.deleteQuerySetChosen(query_set_id, resource, request._authUser)
class QuerySetFilteredResultService(BaseQuerySetService):
@access.authenticated
@return_xml
def rest_GET(self, request, query_set_id):
return self.mgr.getQuerySetFilteredResult(query_set_id, for_user=request._authUser)
class QuerySetChildResultService(BaseQuerySetService):
@access.authenticated
@return_xml
def rest_GET(self, request, query_set_id):
if rbac_can_read_queryset(self, request, query_set_id):
return self.mgr.getQuerySetChildResult(query_set_id)
else:
return self.mgr.getQuerySetChildResult(query_set_id, for_user=request._authUser)
# this is not expected to be our final API for removing child members
# but serves as a temporary one in case someone needs it. Deleting
# the queryset is not an option to clear it out because associated
# grants would be purged.
@rbac(rbac_can_write_queryset)
@requires('query_set')
@return_xml
def rest_DELETE(self, request, query_set_id, query_set):
return self.mgr.deleteQuerySetChild(query_set_id, query_set, for_user=request._authUser)
class QuerySetJobsService(BaseQuerySetService):
# no way to list running jobs at the moment
# since all jobs run immediately
@rbac(rbac_can_read_queryset)
@xObjRequires('job')
def rest_POST(self, request, query_set_id, job):
'''launch a job on this queryset'''
queryset = self.mgr.getQuerySet(query_set_id)
self.mgr.scheduleQuerySetJobAction(
queryset, job
)
return http.HttpResponse(status=200)
class QuerySetFilterDescriptorService(BaseQuerySetService):
# @access.authenticated
@return_xml
def rest_GET(self, request, query_set_id=None):
return self.mgr.getQuerySetFilterDescriptor(query_set_id)
| apache-2.0 | -5,749,486,631,042,150,000 | 36.56338 | 122 | 0.699038 | false |
Anonymike/pasta-bot | plugins/google_broken.py | 1 | 3457 | import random
from util import hook, http, text, database, web
import re
def api_get(kind, query):
"""Use the RESTful Google Search API"""
url = 'http://ajax.googleapis.com/ajax/services/search/%s?' \
'v=1.0&safe=off'
return http.get_json(url % kind, q=query)
@hook.command('search')
@hook.command('g')
@hook.command
def google(inp,db=None,chan=None):
"""google <query> -- Returns first google search result for <query>."""
trimlength = database.get(db,'channels','trimlength','chan',chan)
if not trimlength: trimlength = 9999
parsed = api_get('web', inp)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error searching for pages: {}: {}'.format(parsed['responseStatus'], ''))
if not parsed['responseData']['results']:
return 'No results found.'
result = parsed['responseData']['results'][0]
title = http.unescape(result['titleNoFormatting'])
content = http.unescape(result['content'])
if not content: content = "No description available."
else: content = http.html.fromstring(content.replace('\n', '')).text_content()
return u'{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title, content)
# @hook.command('image')
@hook.command('gis')
@hook.command('gi')
@hook.command('image')
@hook.command
def googleimage(inp):
"""gis <query> -- Returns first Google Image result for <query>."""
parsed = api_get('images', inp)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error searching for images: {}: {}'.format(parsed['responseStatus'], ''))
if not parsed['responseData']['results']:
return 'no images found'
return random.choice(parsed['responseData']['results'][:10])['unescapedUrl']
@hook.command
def gcalc(inp):
"gcalc <term> -- Calculate <term> with Google Calc."
soup = http.get_soup('http://www.google.com/search', q=inp)
result = soup.find('span', {'class': 'cwcot'})
formula = soup.find('span', {'class': 'cwclet'})
if not result:
return "Could not calculate '{}'".format(inp)
return u"{} {}".format(formula.contents[0].strip(),result.contents[0].strip())
@hook.regex(r'^\>(.*\.(gif|GIF|jpg|JPG|jpeg|JPEG|png|PNG|tiff|TIFF|bmp|BMP))\s?(\d+)?')
@hook.command
def implying(inp):
""">laughing girls.gif <num> -- Returns first Google Image result for <query>."""
try: search = inp.group(1)
except: search = inp
try: num = int(inp.group(3))
except: num = 0
if 'http' in search: return
parsed = api_get('images', search)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error searching for images: {}: {}'.format(parsed['responseStatus'], ''))
if not parsed['responseData']['results']:
return 'no images found'
try: return u'\x033\x02>{}\x02\x03 {}'.format(search, parsed['responseData']['results'][:10][num]['unescapedUrl'])
except: return u'\x033\x02>{}\x02\x03 {}'.format(search, parsed['responseData']['results'][:10][0]['unescapedUrl'])
#return random.choice(parsed['responseData']['results'][:10])['unescapedUrl']
@hook.command('nym')
@hook.command('littleanon')
@hook.command('gfy')
@hook.command
def lmgtfy(inp, bot=None):
"lmgtfy [phrase] - Posts a google link for the specified phrase"
link = "http://lmgtfy.com/?q=%s" % http.quote_plus(inp)
try:
return web.isgd(link)
except (web.ShortenError, http.HTTPError):
return link
| gpl-3.0 | 2,788,865,380,336,183,000 | 33.919192 | 119 | 0.639283 | false |
Himon-SYNCRAFT/taskplus | tests/core/actions/test_get_task_status_details.py | 1 | 3408 | from unittest import mock
from taskplus.core.actions import (GetTaskStatusDetailsAction,
GetTaskStatusDetailsRequest)
from taskplus.core.domain import TaskStatus
from taskplus.core.shared.response import ResponseFailure
def test_get_status_details_action():
status = mock.Mock()
status = TaskStatus(name='new', id=1)
statuses_repo = mock.Mock()
statuses_repo.one.return_value = status
request = GetTaskStatusDetailsRequest(status.id)
action = GetTaskStatusDetailsAction(statuses_repo)
response = action.execute(request)
assert bool(response) is True
statuses_repo.one.assert_called_once_with(status.id)
assert response.value == status
def test_get_status_details_action_with_hooks():
status = mock.Mock()
status = TaskStatus(name='new', id=1)
statuses_repo = mock.Mock()
statuses_repo.one.return_value = status
request = GetTaskStatusDetailsRequest(status.id)
action = GetTaskStatusDetailsAction(statuses_repo)
before = mock.MagicMock()
after = mock.MagicMock()
action.add_before_execution_hook(before)
action.add_after_execution_hook(after)
response = action.execute(request)
assert before.called
assert after.called
assert bool(response) is True
statuses_repo.one.assert_called_once_with(status.id)
assert response.value == status
def test_get_status_details_action_handles_bad_request():
status = mock.Mock()
status = TaskStatus(name='new', id=1)
statuses_repo = mock.Mock()
statuses_repo.one.return_value = status
request = GetTaskStatusDetailsRequest(status_id=None)
action = GetTaskStatusDetailsAction(statuses_repo)
response = action.execute(request)
assert bool(response) is False
assert not statuses_repo.one.called
assert response.value == {
'type': ResponseFailure.PARAMETER_ERROR,
'message': 'status_id: is required'
}
def test_get_status_details_action_handles_generic_error():
error_message = 'Error!!!'
statuses_repo = mock.Mock()
statuses_repo.one.side_effect = Exception(error_message)
request = GetTaskStatusDetailsRequest(status_id=1)
action = GetTaskStatusDetailsAction(statuses_repo)
response = action.execute(request)
assert bool(response) is False
statuses_repo.one.assert_called_once_with(1)
assert response.value == {
'type': ResponseFailure.SYSTEM_ERROR,
'message': 'Exception: {}'.format(error_message)
}
def test_get_status_details_request():
status_id = 1
request = GetTaskStatusDetailsRequest(status_id)
assert request.is_valid()
assert request.status_id == status_id
def test_get_status_details_request_without_id():
status_id = None
request = GetTaskStatusDetailsRequest(status_id)
assert not request.is_valid()
assert request.status_id == status_id
assert len(request.errors) == 1
error = request.errors[0]
assert error.parameter == 'status_id'
assert error.message == 'is required'
def test_get_status_details_bad_request():
status_id = 'asd'
request = GetTaskStatusDetailsRequest(status_id)
assert not request.is_valid()
assert request.status_id == status_id
assert len(request.errors) == 1
error = request.errors[0]
assert error.parameter == 'status_id'
assert error.message == 'expected int, got str(asd)'
| bsd-3-clause | -939,071,211,444,209,800 | 29.159292 | 63 | 0.701585 | false |
codeforamerica/comport | migrations/versions/0d78d545906f_.py | 1 | 1135 | """Add 'is_public' flags for datasets
Revision ID: 0d78d545906f
Revises: 6d30846080b2
Create Date: 2016-06-27 15:30:14.415519
"""
# revision identifiers, used by Alembic.
revision = '0d78d545906f'
down_revision = '6d30846080b2'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('departments', sa.Column('is_public_assaults_on_officers', sa.Boolean(), server_default=sa.true(), nullable=False))
op.add_column('departments', sa.Column('is_public_citizen_complaints', sa.Boolean(), server_default=sa.true(), nullable=False))
op.add_column('departments', sa.Column('is_public_officer_involved_shootings', sa.Boolean(), server_default=sa.true(), nullable=False))
op.add_column('departments', sa.Column('is_public_use_of_force_incidents', sa.Boolean(), server_default=sa.true(), nullable=False))
def downgrade():
op.drop_column('departments', 'is_public_use_of_force_incidents')
op.drop_column('departments', 'is_public_officer_involved_shootings')
op.drop_column('departments', 'is_public_citizen_complaints')
op.drop_column('departments', 'is_public_assaults_on_officers')
| bsd-3-clause | -5,945,847,998,224,271,000 | 39.535714 | 139 | 0.732159 | false |
forseti-security/forseti-security | google/cloud/forseti/common/gcp_api/admin_directory.py | 1 | 10459 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for Admin Directory API client."""
from builtins import object
from googleapiclient import errors
from httplib2 import HttpLib2Error
from google.auth.exceptions import RefreshError
from google.cloud.forseti.common.gcp_api import _base_repository
from google.cloud.forseti.common.gcp_api import api_helpers
from google.cloud.forseti.common.gcp_api import errors as api_errors
from google.cloud.forseti.common.gcp_api import repository_mixins
from google.cloud.forseti.common.util import logger
LOGGER = logger.get_logger(__name__)
API_NAME = 'admin'
REQUIRED_SCOPES = frozenset([
'https://www.googleapis.com/auth/admin.directory.group.readonly',
'https://www.googleapis.com/auth/admin.directory.user.readonly'
])
GSUITE_AUTH_FAILURE_MESSAGE = (
'Failed to retrieve G Suite data due to authentication '
'failure. Please make sure your forseti_server_config.yaml '
'file contains the most updated information and enable G '
'Suite Groups Collection if you haven\'t done so. Instructions'
' on how to enable: https://forsetisecurity.org/docs/latest/'
'configure/inventory/gsuite.html')
class AdminDirectoryRepositoryClient(_base_repository.BaseRepositoryClient):
"""Admin Directory API Respository Client."""
def __init__(self,
credentials,
quota_max_calls=None,
quota_period=1.0,
use_rate_limiter=True,
cache_discovery=False,
cache=None):
"""Constructor.
Args:
credentials (object): An google.auth credentials object. The admin
directory API needs a service account credential with delegated
super admin role.
quota_max_calls (int): Allowed requests per <quota_period> for the
API.
quota_period (float): The time period to track requests over.
use_rate_limiter (bool): Set to false to disable the use of a rate
limiter for this service.
cache_discovery (bool): When set to true, googleapiclient will cache
HTTP requests to API discovery endpoints.
cache (googleapiclient.discovery_cache.base.Cache): instance of a
class that can cache API discovery documents. If None,
googleapiclient will attempt to choose a default.
"""
if not quota_max_calls:
use_rate_limiter = False
self._groups = None
self._members = None
self._users = None
super(AdminDirectoryRepositoryClient, self).__init__(
API_NAME, versions=['directory_v1'],
credentials=credentials,
quota_max_calls=quota_max_calls,
quota_period=quota_period,
use_rate_limiter=use_rate_limiter,
cache_discovery=cache_discovery,
cache=cache)
# Turn off docstrings for properties.
# pylint: disable=missing-return-doc, missing-return-type-doc
@property
def groups(self):
"""Returns an _AdminDirectoryGroupsRepository instance."""
if not self._groups:
self._groups = self._init_repository(
_AdminDirectoryGroupsRepository)
return self._groups
@property
def members(self):
"""Returns an _AdminDirectoryMembersRepository instance."""
if not self._members:
self._members = self._init_repository(
_AdminDirectoryMembersRepository)
return self._members
@property
def users(self):
"""Returns an _AdminDirectoryUsersRepository instance."""
if not self._users:
self._users = self._init_repository(
_AdminDirectoryUsersRepository)
return self._users
# pylint: enable=missing-return-doc, missing-return-type-doc
class _AdminDirectoryGroupsRepository(
repository_mixins.ListQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Admin Directory Groups repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_AdminDirectoryGroupsRepository, self).__init__(
key_field='', component='groups', **kwargs)
class _AdminDirectoryMembersRepository(
repository_mixins.ListQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Admin Directory Members repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_AdminDirectoryMembersRepository, self).__init__(
key_field='groupKey', component='members', **kwargs)
class _AdminDirectoryUsersRepository(
repository_mixins.ListQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Admin Directory Users repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_AdminDirectoryUsersRepository, self).__init__(
key_field='', component='users', **kwargs)
class AdminDirectoryClient(object):
"""GSuite Admin Directory API Client."""
def __init__(self, global_configs, **kwargs):
"""Initialize.
Args:
global_configs (dict): Global configurations.
**kwargs (dict): The kwargs.
"""
credentials = api_helpers.get_delegated_credential(
global_configs.get('domain_super_admin_email'),
REQUIRED_SCOPES)
max_calls, quota_period = api_helpers.get_ratelimiter_config(
global_configs, API_NAME)
cache_discovery = global_configs[
'cache_discovery'] if 'cache_discovery' in global_configs else False
self.repository = AdminDirectoryRepositoryClient(
credentials=credentials,
quota_max_calls=max_calls,
quota_period=quota_period,
use_rate_limiter=kwargs.get('use_rate_limiter', True),
cache_discovery=cache_discovery,
cache=global_configs.get('cache'))
def get_group_members(self, group_key):
"""Get all the members for specified groups.
Args:
group_key (str): The group's unique id assigned by the Admin API.
Returns:
list: A list of member objects from the API.
Raises:
api_errors.ApiExecutionError: If group member retrieval fails.
"""
try:
paged_results = self.repository.members.list(group_key)
result = api_helpers.flatten_list_results(paged_results, 'members')
LOGGER.debug('Getting all the members for group_key = %s,'
' result = %s', group_key, result)
return result
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(group_key, e)
def get_groups(self, customer_id='my_customer'):
"""Get all the groups for a given customer_id.
A note on customer_id='my_customer'. This is a magic string instead
of using the real customer id. See:
https://developers.google.com/admin-sdk/directory/v1/guides/manage-groups#get_all_domain_groups
Args:
customer_id (str): The customer id to scope the request to.
Returns:
list: A list of group objects returned from the API.
Raises:
api_errors.ApiExecutionError: If groups retrieval fails.
RefreshError: If the authentication fails.
"""
try:
paged_results = self.repository.groups.list(customer=customer_id)
flattened_results = api_helpers.flatten_list_results(
paged_results, 'groups')
LOGGER.debug('Getting all the groups for customer_id = %s,'
' flattened_results = %s',
customer_id, flattened_results)
return flattened_results
except RefreshError as e:
# Authentication failed, log before raise.
LOGGER.exception(GSUITE_AUTH_FAILURE_MESSAGE)
raise e
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError('groups', e)
def get_users(self, customer_id='my_customer'):
"""Get all the users for a given customer_id.
A note on customer_id='my_customer'. This is a magic string instead
of using the real customer id. See:
https://developers.google.com/admin-sdk/directory/v1/guides/manage-groups#get_all_domain_groups
Args:
customer_id (str): The customer id to scope the request to.
Returns:
list: A list of user objects returned from the API.
Raises:
api_errors.ApiExecutionError: If groups retrieval fails.
RefreshError: If the authentication fails.
"""
try:
paged_results = self.repository.users.list(customer=customer_id,
viewType='admin_view')
flattened_results = api_helpers.flatten_list_results(
paged_results, 'users')
LOGGER.debug('Getting all the users for customer_id = %s,'
' flattened_results = %s',
customer_id, flattened_results)
return flattened_results
except RefreshError as e:
# Authentication failed, log before raise.
LOGGER.exception(GSUITE_AUTH_FAILURE_MESSAGE)
raise e
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError('users', e)
| apache-2.0 | -5,565,374,687,981,949,000 | 37.032727 | 103 | 0.627115 | false |
madgik/exareme | Exareme-Docker/src/exareme/exareme-tools/madis/src/functionslocal/vtable/dummycoding.py | 1 | 2450 | import setpath
import functions
import json
registered=True
def convert(data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert, data))
else:
return data
class dummycoding(functions.vtable.vtbase.VT):
def VTiter(self, *parsedArgs,**envars):
largs, dictargs = self.full_parse(parsedArgs)
if 'query' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1],"No query argument ")
query = dictargs['query']
if 'metadata' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1],"No metadata ")
metadata = json.loads(dictargs['metadata'])
cur = envars['db'].cursor()
c=cur.execute(query)
schema = cur.getdescriptionsafe()
no = 0
for myrow in c:
first_tuple = []
schema1 = []
for item in xrange(len(schema)):
if schema[item][0] in metadata:
vals = metadata[schema[item][0]].split(',')
vals.sort()
for v in vals:
newv = str(schema[item][0]) + '(' + str(v) + ')'
schema1.append(newv)
if myrow[item] == v:
first_tuple.append(1)
else :
first_tuple.append(0)
else:
# print 'no', schema[item][0]
newv = str(schema[item][0])
schema1.append(newv)
first_tuple.append(myrow[item])
if no == 0:
# print tuple((x,) for x in schema1)
yield tuple((x,) for x in schema1)
no =no+1
# print str(first_tuple)
yield tuple(first_tuple,)
def Source():
return functions.vtable.vtbase.VTGenerator(dummycoding)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.tes | mit | -3,340,337,105,526,376,000 | 29.259259 | 88 | 0.517959 | false |
matrix-org/synapse | tests/replication/test_sharded_event_persister.py | 1 | 12377 | # Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from unittest.mock import patch
from synapse.api.room_versions import RoomVersion
from synapse.rest import admin
from synapse.rest.client.v1 import login, room
from synapse.rest.client.v2_alpha import sync
from tests.replication._base import BaseMultiWorkerStreamTestCase
from tests.server import make_request
from tests.utils import USE_POSTGRES_FOR_TESTS
logger = logging.getLogger(__name__)
class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
"""Checks event persisting sharding works"""
# Event persister sharding requires postgres (due to needing
# `MultiWriterIdGenerator`).
if not USE_POSTGRES_FOR_TESTS:
skip = "Requires Postgres"
servlets = [
admin.register_servlets_for_client_rest_resource,
room.register_servlets,
login.register_servlets,
sync.register_servlets,
]
def prepare(self, reactor, clock, hs):
# Register a user who sends a message that we'll get notified about
self.other_user_id = self.register_user("otheruser", "pass")
self.other_access_token = self.login("otheruser", "pass")
self.room_creator = self.hs.get_room_creation_handler()
self.store = hs.get_datastore()
def default_config(self):
conf = super().default_config()
conf["redis"] = {"enabled": "true"}
conf["stream_writers"] = {"events": ["worker1", "worker2"]}
conf["instance_map"] = {
"worker1": {"host": "testserv", "port": 1001},
"worker2": {"host": "testserv", "port": 1002},
}
return conf
def _create_room(self, room_id: str, user_id: str, tok: str):
"""Create a room with given room_id"""
# We control the room ID generation by patching out the
# `_generate_room_id` method
async def generate_room(
creator_id: str, is_public: bool, room_version: RoomVersion
):
await self.store.store_room(
room_id=room_id,
room_creator_user_id=creator_id,
is_public=is_public,
room_version=room_version,
)
return room_id
with patch(
"synapse.handlers.room.RoomCreationHandler._generate_room_id"
) as mock:
mock.side_effect = generate_room
self.helper.create_room_as(user_id, tok=tok)
def test_basic(self):
"""Simple test to ensure that multiple rooms can be created and joined,
and that different rooms get handled by different instances.
"""
self.make_worker_hs(
"synapse.app.generic_worker",
{"worker_name": "worker1"},
)
self.make_worker_hs(
"synapse.app.generic_worker",
{"worker_name": "worker2"},
)
persisted_on_1 = False
persisted_on_2 = False
store = self.hs.get_datastore()
user_id = self.register_user("user", "pass")
access_token = self.login("user", "pass")
# Keep making new rooms until we see rooms being persisted on both
# workers.
for _ in range(10):
# Create a room
room = self.helper.create_room_as(user_id, tok=access_token)
# The other user joins
self.helper.join(
room=room, user=self.other_user_id, tok=self.other_access_token
)
# The other user sends some messages
rseponse = self.helper.send(room, body="Hi!", tok=self.other_access_token)
event_id = rseponse["event_id"]
# The event position includes which instance persisted the event.
pos = self.get_success(store.get_position_for_event(event_id))
persisted_on_1 |= pos.instance_name == "worker1"
persisted_on_2 |= pos.instance_name == "worker2"
if persisted_on_1 and persisted_on_2:
break
self.assertTrue(persisted_on_1)
self.assertTrue(persisted_on_2)
def test_vector_clock_token(self):
"""Tests that using a stream token with a vector clock component works
correctly with basic /sync and /messages usage.
"""
self.make_worker_hs(
"synapse.app.generic_worker",
{"worker_name": "worker1"},
)
worker_hs2 = self.make_worker_hs(
"synapse.app.generic_worker",
{"worker_name": "worker2"},
)
sync_hs = self.make_worker_hs(
"synapse.app.generic_worker",
{"worker_name": "sync"},
)
sync_hs_site = self._hs_to_site[sync_hs]
# Specially selected room IDs that get persisted on different workers.
room_id1 = "!foo:test"
room_id2 = "!baz:test"
self.assertEqual(
self.hs.config.worker.events_shard_config.get_instance(room_id1), "worker1"
)
self.assertEqual(
self.hs.config.worker.events_shard_config.get_instance(room_id2), "worker2"
)
user_id = self.register_user("user", "pass")
access_token = self.login("user", "pass")
store = self.hs.get_datastore()
# Create two room on the different workers.
self._create_room(room_id1, user_id, access_token)
self._create_room(room_id2, user_id, access_token)
# The other user joins
self.helper.join(
room=room_id1, user=self.other_user_id, tok=self.other_access_token
)
self.helper.join(
room=room_id2, user=self.other_user_id, tok=self.other_access_token
)
# Do an initial sync so that we're up to date.
channel = make_request(
self.reactor, sync_hs_site, "GET", "/sync", access_token=access_token
)
next_batch = channel.json_body["next_batch"]
# We now gut wrench into the events stream MultiWriterIdGenerator on
# worker2 to mimic it getting stuck persisting an event. This ensures
# that when we send an event on worker1 we end up in a state where
# worker2 events stream position lags that on worker1, resulting in a
# RoomStreamToken with a non-empty instance map component.
#
# Worker2's event stream position will not advance until we call
# __aexit__ again.
actx = worker_hs2.get_datastore()._stream_id_gen.get_next()
self.get_success(actx.__aenter__())
response = self.helper.send(room_id1, body="Hi!", tok=self.other_access_token)
first_event_in_room1 = response["event_id"]
# Assert that the current stream token has an instance map component, as
# we are trying to test vector clock tokens.
room_stream_token = store.get_room_max_token()
self.assertNotEqual(len(room_stream_token.instance_map), 0)
# Check that syncing still gets the new event, despite the gap in the
# stream IDs.
channel = make_request(
self.reactor,
sync_hs_site,
"GET",
"/sync?since={}".format(next_batch),
access_token=access_token,
)
# We should only see the new event and nothing else
self.assertIn(room_id1, channel.json_body["rooms"]["join"])
self.assertNotIn(room_id2, channel.json_body["rooms"]["join"])
events = channel.json_body["rooms"]["join"][room_id1]["timeline"]["events"]
self.assertListEqual(
[first_event_in_room1], [event["event_id"] for event in events]
)
# Get the next batch and makes sure its a vector clock style token.
vector_clock_token = channel.json_body["next_batch"]
self.assertTrue(vector_clock_token.startswith("m"))
# Now that we've got a vector clock token we finish the fake persisting
# an event we started above.
self.get_success(actx.__aexit__(None, None, None))
# Now try and send an event to the other rooom so that we can test that
# the vector clock style token works as a `since` token.
response = self.helper.send(room_id2, body="Hi!", tok=self.other_access_token)
first_event_in_room2 = response["event_id"]
channel = make_request(
self.reactor,
sync_hs_site,
"GET",
"/sync?since={}".format(vector_clock_token),
access_token=access_token,
)
self.assertNotIn(room_id1, channel.json_body["rooms"]["join"])
self.assertIn(room_id2, channel.json_body["rooms"]["join"])
events = channel.json_body["rooms"]["join"][room_id2]["timeline"]["events"]
self.assertListEqual(
[first_event_in_room2], [event["event_id"] for event in events]
)
next_batch = channel.json_body["next_batch"]
# We also want to test that the vector clock style token works with
# pagination. We do this by sending a couple of new events into the room
# and syncing again to get a prev_batch token for each room, then
# paginating from there back to the vector clock token.
self.helper.send(room_id1, body="Hi again!", tok=self.other_access_token)
self.helper.send(room_id2, body="Hi again!", tok=self.other_access_token)
channel = make_request(
self.reactor,
sync_hs_site,
"GET",
"/sync?since={}".format(next_batch),
access_token=access_token,
)
prev_batch1 = channel.json_body["rooms"]["join"][room_id1]["timeline"][
"prev_batch"
]
prev_batch2 = channel.json_body["rooms"]["join"][room_id2]["timeline"][
"prev_batch"
]
# Paginating back in the first room should not produce any results, as
# no events have happened in it. This tests that we are correctly
# filtering results based on the vector clock portion.
channel = make_request(
self.reactor,
sync_hs_site,
"GET",
"/rooms/{}/messages?from={}&to={}&dir=b".format(
room_id1, prev_batch1, vector_clock_token
),
access_token=access_token,
)
self.assertListEqual([], channel.json_body["chunk"])
# Paginating back on the second room should produce the first event
# again. This tests that pagination isn't completely broken.
channel = make_request(
self.reactor,
sync_hs_site,
"GET",
"/rooms/{}/messages?from={}&to={}&dir=b".format(
room_id2, prev_batch2, vector_clock_token
),
access_token=access_token,
)
self.assertEqual(len(channel.json_body["chunk"]), 1)
self.assertEqual(
channel.json_body["chunk"][0]["event_id"], first_event_in_room2
)
# Paginating forwards should give the same results
channel = make_request(
self.reactor,
sync_hs_site,
"GET",
"/rooms/{}/messages?from={}&to={}&dir=f".format(
room_id1, vector_clock_token, prev_batch1
),
access_token=access_token,
)
self.assertListEqual([], channel.json_body["chunk"])
channel = make_request(
self.reactor,
sync_hs_site,
"GET",
"/rooms/{}/messages?from={}&to={}&dir=f".format(
room_id2,
vector_clock_token,
prev_batch2,
),
access_token=access_token,
)
self.assertEqual(len(channel.json_body["chunk"]), 1)
self.assertEqual(
channel.json_body["chunk"][0]["event_id"], first_event_in_room2
)
| apache-2.0 | -5,067,781,765,285,621,000 | 35.83631 | 87 | 0.592874 | false |
gnarula/eden_deployment | modules/s3db/msg.py | 1 | 88933 | # -*- coding: utf-8 -*-
""" Sahana Eden Messaging Model
@copyright: 2009-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3ChannelModel",
"S3MessageModel",
"S3MessageAttachmentModel",
"S3EmailModel",
"S3FacebookModel",
"S3MCommonsModel",
"S3ParsingModel",
"S3RSSModel",
"S3SMSModel",
"S3SMSOutboundModel",
"S3TropoModel",
"S3TwilioModel",
"S3TwitterModel",
"S3TwitterSearchModel",
"S3XFormsModel",
"S3BaseStationModel",
)
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# Compact JSON encoding
SEPARATORS = (",", ":")
# =============================================================================
class S3ChannelModel(S3Model):
"""
Messaging Channels
- all Inbound & Outbound channels for messages are instances of this
super-entity
"""
names = ("msg_channel",
"msg_channel_limit",
"msg_channel_status",
"msg_channel_id",
"msg_channel_enable",
"msg_channel_disable",
"msg_channel_enable_interactive",
"msg_channel_disable_interactive",
"msg_channel_onaccept",
)
def model(self):
T = current.T
db = current.db
define_table = self.define_table
#----------------------------------------------------------------------
# Super entity: msg_channel
#
channel_types = Storage(msg_email_channel = T("Email (Inbound)"),
msg_facebook_channel = T("Facebook"),
msg_mcommons_channel = T("Mobile Commons (Inbound)"),
msg_rss_channel = T("RSS Feed"),
msg_sms_modem_channel = T("SMS Modem"),
msg_sms_webapi_channel = T("SMS WebAPI (Outbound)"),
msg_sms_smtp_channel = T("SMS via SMTP (Outbound)"),
msg_tropo_channel = T("Tropo"),
msg_twilio_channel = T("Twilio (Inbound)"),
msg_twitter_channel = T("Twitter"),
)
tablename = "msg_channel"
self.super_entity(tablename, "channel_id",
channel_types,
Field("name",
#label = T("Name"),
),
Field("description",
#label = T("Description"),
),
Field("enabled", "boolean",
default = True,
#label = T("Enabled?")
#represent = s3_yes_no_represent,
),
# @ToDo: Indicate whether channel can be used for Inbound or Outbound
#Field("inbound", "boolean",
# label = T("Inbound?")),
#Field("outbound", "boolean",
# label = T("Outbound?")),
)
# @todo: make lazy_table
table = db[tablename]
table.instance_type.readable = True
# Reusable Field
channel_id = S3ReusableField("channel_id", "reference %s" % tablename,
label = T("Channel"),
ondelete = "SET NULL",
represent = S3Represent(lookup=tablename),
requires = IS_EMPTY_OR(
IS_ONE_OF_EMPTY(db, "msg_channel.id")),
)
self.add_components(tablename,
msg_channel_status = "channel_id",
)
# ---------------------------------------------------------------------
# Channel Limit
# Used to limit the number of emails sent from the system
# - works by simply recording an entry for the timestamp to be checked against
#
# - currently just used by msg.send_email()
#
tablename = "msg_channel_limit"
define_table(tablename,
# @ToDo: Make it per-channel
#channel_id(),
*s3_timestamp())
# ---------------------------------------------------------------------
# Channel Status
# Used to record errors encountered in the Channel
#
tablename = "msg_channel_status"
define_table(tablename,
channel_id(),
Field("status",
#label = T("Status")
#represent = s3_yes_no_represent,
),
*s3_meta_fields())
# ---------------------------------------------------------------------
return dict(msg_channel_id = channel_id,
msg_channel_enable = self.channel_enable,
msg_channel_disable = self.channel_disable,
msg_channel_enable_interactive = self.channel_enable_interactive,
msg_channel_disable_interactive = self.channel_disable_interactive,
msg_channel_onaccept = self.channel_onaccept,
msg_channel_poll = self.channel_poll,
)
# -------------------------------------------------------------------------
@staticmethod
def channel_enable(tablename, channel_id):
"""
Enable a Channel
- Schedule a Poll for new messages
- Enable all associated Parsers
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
s3db = current.s3db
table = s3db.table(tablename)
record = db(table.channel_id == channel_id).select(table.id, # needed for update_record
table.enabled,
limitby=(0, 1),
).first()
if not record.enabled:
# Flag it as enabled
# Update Instance
record.update_record(enabled = True)
# Update Super
s3db.update_super(table, record)
# Enable all Parser tasks on this channel
ptable = s3db.msg_parser
query = (ptable.channel_id == channel_id) & \
(ptable.deleted == False)
parsers = db(query).select(ptable.id)
for parser in parsers:
s3db.msg_parser_enable(parser.id)
# Do we have an existing Task?
ttable = db.scheduler_task
args = '["%s", %s]' % (tablename, channel_id)
query = ((ttable.function_name == "msg_poll") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby=(0, 1)).first()
if exists:
return "Channel already enabled"
else:
current.s3task.schedule_task("msg_poll",
args = [tablename, channel_id],
period = 300, # seconds
timeout = 300, # seconds
repeats = 0 # unlimited
)
return "Channel enabled"
# -------------------------------------------------------------------------
@staticmethod
def channel_enable_interactive(r, **attr):
"""
Enable a Channel
- Schedule a Poll for new messages
S3Method for interactive requests
"""
tablename = r.tablename
result = current.s3db.msg_channel_enable(tablename, r.record.channel_id)
current.session.confirmation = result
fn = tablename.split("_", 1)[1]
redirect(URL(f=fn))
# -------------------------------------------------------------------------
@staticmethod
def channel_disable(tablename, channel_id):
"""
Disable a Channel
- Remove schedule for Polling for new messages
- Disable all associated Parsers
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
s3db = current.s3db
table = s3db.table(tablename)
record = db(table.channel_id == channel_id).select(table.id, # needed for update_record
table.enabled,
limitby=(0, 1),
).first()
if record.enabled:
# Flag it as disabled
# Update Instance
record.update_record(enabled = False)
# Update Super
s3db.update_super(table, record)
# Disable all Parser tasks on this channel
ptable = s3db.msg_parser
parsers = db(ptable.channel_id == channel_id).select(ptable.id)
for parser in parsers:
s3db.msg_parser_disable(parser.id)
# Do we have an existing Task?
ttable = db.scheduler_task
args = '["%s", %s]' % (tablename, channel_id)
query = ((ttable.function_name == "msg_poll") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby=(0, 1)).first()
if exists:
# Disable all
db(query).update(status="STOPPED")
return "Channel disabled"
else:
return "Channel already disabled"
# --------------------------------------------------------------------------
@staticmethod
def channel_disable_interactive(r, **attr):
"""
Disable a Channel
- Remove schedule for Polling for new messages
S3Method for interactive requests
"""
tablename = r.tablename
result = current.s3db.msg_channel_disable(tablename, r.record.channel_id)
current.session.confirmation = result
fn = tablename.split("_", 1)[1]
redirect(URL(f=fn))
# -------------------------------------------------------------------------
@staticmethod
def channel_onaccept(form):
"""
Process the Enabled Flag
"""
if form.record:
# Update form
# process of changed
if form.record.enabled and not form.vars.enabled:
current.s3db.msg_channel_disable(form.table._tablename,
form.vars.channel_id)
elif form.vars.enabled and not form.record.enabled:
current.s3db.msg_channel_enable(form.table._tablename,
form.vars.channel_id)
else:
# Create form
# Process only if enabled
if form.vars.enabled:
current.s3db.msg_channel_enable(form.table._tablename,
form.vars.channel_id)
# -------------------------------------------------------------------------
@staticmethod
def channel_poll(r, **attr):
"""
Poll a Channel for new messages
S3Method for interactive requests
"""
tablename = r.tablename
current.s3task.async("msg_poll", args=[tablename, r.record.channel_id])
current.session.confirmation = \
current.T("The poll request has been submitted, so new messages should appear shortly - refresh to see them")
if tablename == "msg_email_channel":
fn = "email_inbox"
elif tablename == "msg_mcommons_channel":
fn = "sms_inbox"
elif tablename == "msg_rss_channel":
fn = "rss"
elif tablename == "msg_twilio_channel":
fn = "sms_inbox"
elif tablename == "msg_twitter_channel":
fn = "twitter_inbox"
else:
return "Unsupported channel: %s" % tablename
redirect(URL(f=fn))
# =============================================================================
class S3MessageModel(S3Model):
"""
Messages
"""
names = ("msg_message",
"msg_message_id",
"msg_message_represent",
"msg_outbox",
)
def model(self):
T = current.T
db = current.db
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
configure = self.configure
define_table = self.define_table
# Message priority
msg_priority_opts = {3 : T("High"),
2 : T("Medium"),
1 : T("Low"),
}
# ---------------------------------------------------------------------
# Message Super Entity - all Inbound & Outbound Messages
#
message_types = Storage(msg_email = T("Email"),
msg_facebook = T("Facebook"),
msg_rss = T("RSS"),
msg_sms = T("SMS"),
msg_twitter = T("Twitter"),
msg_twitter_result = T("Twitter Search Results"),
)
tablename = "msg_message"
self.super_entity(tablename, "message_id",
message_types,
# Knowing which Channel Incoming Messages
# came in on allows correlation to Outbound
# messages (campaign_message, deployment_alert, etc)
self.msg_channel_id(),
s3_datetime(default="now"),
Field("body", "text",
label = T("Message"),
),
Field("from_address",
label = T("From"),
),
Field("to_address",
label = T("To"),
),
Field("inbound", "boolean",
default = False,
label = T("Direction"),
represent = lambda direction: \
(direction and [T("In")] or \
[T("Out")])[0],
),
)
# @todo: make lazy_table
table = db[tablename]
table.instance_type.readable = True
table.instance_type.writable = True
configure(tablename,
list_fields = ["instance_type",
"from_address",
"to_address",
"body",
"inbound",
],
)
# Reusable Field
message_represent = S3Represent(lookup=tablename, fields=["body"])
message_id = S3ReusableField("message_id", "reference %s" % tablename,
ondelete = "RESTRICT",
represent = message_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF_EMPTY(db, "msg_message.id")),
)
self.add_components(tablename,
msg_attachment = "message_id",
deploy_response = "message_id",
)
# ---------------------------------------------------------------------
# Outbound Messages
#
# Show only the supported messaging methods
MSG_CONTACT_OPTS = current.msg.MSG_CONTACT_OPTS
# Maximum number of retries to send a message
MAX_SEND_RETRIES = current.deployment_settings.get_msg_max_send_retries()
# Valid message outbox statuses
MSG_STATUS_OPTS = {1 : T("Unsent"),
2 : T("Sent"),
3 : T("Draft"),
4 : T("Invalid"),
5 : T("Failed"),
}
opt_msg_status = S3ReusableField("status", "integer",
notnull=True,
requires = IS_IN_SET(MSG_STATUS_OPTS,
zero=None),
default = 1,
label = T("Status"),
represent = lambda opt: \
MSG_STATUS_OPTS.get(opt,
UNKNOWN_OPT))
# Outbox - needs to be separate to Message since a single message
# sent needs different outbox entries for each recipient
tablename = "msg_outbox"
define_table(tablename,
# FK not instance
message_id(),
# Person/Group to send the message out to:
self.super_link("pe_id", "pr_pentity"),
# If set used instead of picking up from pe_id:
Field("address"),
Field("contact_method", length=32,
default = "EMAIL",
label = T("Contact Method"),
represent = lambda opt: \
MSG_CONTACT_OPTS.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(MSG_CONTACT_OPTS,
zero=None),
),
opt_msg_status(),
# Used to loop through a PE to get it's members
Field("system_generated", "boolean",
default = False,
),
# Give up if we can't send after MAX_RETRIES
Field("retries", "integer",
default = MAX_SEND_RETRIES,
readable = False,
writable = False,
),
*s3_meta_fields())
configure(tablename,
list_fields = ["id",
"message_id",
"pe_id",
"status",
],
orderby = "msg_outbox.created_on desc",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return dict(msg_message_id = message_id,
msg_message_represent = message_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(msg_message_id = lambda **attr: dummy("message_id"),
)
# =============================================================================
class S3MessageAttachmentModel(S3Model):
"""
Message Attachments
- link table between msg_message & doc_document
"""
names = ("msg_attachment",)
def model(self):
# ---------------------------------------------------------------------
#
tablename = "msg_attachment"
self.define_table(tablename,
# FK not instance
self.msg_message_id(),
self.doc_document_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return dict()
# =============================================================================
class S3EmailModel(S3ChannelModel):
"""
Email
InBound Channels
Outbound Email is currently handled via deployment_settings
InBox/OutBox
"""
names = ("msg_email_channel",
"msg_email",
)
def model(self):
T = current.T
configure = self.configure
define_table = self.define_table
set_method = self.set_method
super_link = self.super_link
# ---------------------------------------------------------------------
# Email Inbound Channels
#
tablename = "msg_email_channel"
define_table(tablename,
# Instance
super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("server"),
Field("protocol",
requires = IS_IN_SET(["imap", "pop3"],
zero=None),
),
Field("use_ssl", "boolean"),
Field("port", "integer"),
Field("username"),
Field("password", "password", length=64,
readable = False,
requires = IS_NOT_EMPTY(),
),
# Set true to delete messages from the remote
# inbox after fetching them.
Field("delete_from_server", "boolean"),
*s3_meta_fields())
configure(tablename,
onaccept = self.msg_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "email_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "email_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "email_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# Email Messages: InBox & Outbox
#
sender = current.deployment_settings.get_mail_sender()
tablename = "msg_email"
define_table(tablename,
# Instance
super_link("message_id", "msg_message"),
self.msg_channel_id(),
s3_datetime(default = "now"),
Field("subject", length=78, # RFC 2822
label = T("Subject"),
),
Field("body", "text",
label = T("Message"),
),
Field("from_address", #notnull=True,
default = sender,
label = T("Sender"),
requires = IS_EMAIL(),
),
Field("to_address",
label = T("To"),
requires = IS_EMAIL(),
),
Field("raw", "text",
label = T("Message Source"),
readable = False,
writable = False,
),
Field("inbound", "boolean",
default = False,
label = T("Direction"),
represent = lambda direction: \
(direction and [T("In")] or [T("Out")])[0],
),
*s3_meta_fields())
configure(tablename,
orderby = "msg_email.date desc",
super_entity = "msg_message",
)
# Components
self.add_components(tablename,
# Used to link to custom tab deploy_response_select_mission:
deploy_mission = {"name": "select",
"link": "deploy_response",
"joinby": "message_id",
"key": "mission_id",
"autodelete": False,
},
)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3FacebookModel(S3ChannelModel):
"""
Facebook
Channels
InBox/OutBox
https://developers.facebook.com/docs/graph-api
"""
names = ("msg_facebook_channel",
"msg_facebook",
"msg_facebook_login",
)
def model(self):
T = current.T
configure = self.configure
define_table = self.define_table
set_method = self.set_method
super_link = self.super_link
# ---------------------------------------------------------------------
# Facebook Channels
#
tablename = "msg_facebook_channel"
define_table(tablename,
# Instance
super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("login", "boolean",
default = False,
label = T("Use for Login?"),
represent = s3_yes_no_represent,
),
Field("app_id", "bigint",
requires = IS_INT_IN_RANGE(0, +1e16)
),
Field("app_secret", "password", length=64,
readable = False,
requires = IS_NOT_EMPTY(),
),
# Optional
Field("page_id", "bigint",
requires = IS_INT_IN_RANGE(0, +1e16)
),
Field("page_access_token"),
*s3_meta_fields())
configure(tablename,
onaccept = self.msg_facebook_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "facebook_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "facebook_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
#set_method("msg", "facebook_channel",
# method = "poll",
# action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# Facebook Messages: InBox & Outbox
#
tablename = "msg_facebook"
define_table(tablename,
# Instance
super_link("message_id", "msg_message"),
self.msg_channel_id(),
s3_datetime(default = "now"),
Field("body", "text",
label = T("Message"),
),
# @ToDo: Are from_address / to_address relevant in Facebook?
Field("from_address", #notnull=True,
#default = sender,
label = T("Sender"),
),
Field("to_address",
label = T("To"),
),
Field("inbound", "boolean",
default = False,
label = T("Direction"),
represent = lambda direction: \
(direction and [T("In")] or [T("Out")])[0],
),
*s3_meta_fields())
configure(tablename,
orderby = "msg_facebook.date desc",
super_entity = "msg_message",
)
# ---------------------------------------------------------------------
return dict(msg_facebook_login = self.msg_facebook_login,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for model-global names if module is disabled """
return dict(msg_facebook_login = lambda: False,
)
# -------------------------------------------------------------------------
@staticmethod
def msg_facebook_channel_onaccept(form):
if form.vars.login:
# Ensure only a single account used for Login
current.db(current.s3db.msg_facebook_channel.id != form.vars.id).update(login = False)
# Normal onaccept processing
S3ChannelModel.channel_onaccept(form)
# -------------------------------------------------------------------------
@staticmethod
def msg_facebook_login():
table = current.s3db.msg_facebook_channel
query = (table.login == True) & \
(table.deleted == False)
c = current.db(query).select(table.app_id,
table.app_secret,
limitby=(0, 1)
).first()
return c
# =============================================================================
class S3MCommonsModel(S3ChannelModel):
"""
Mobile Commons Inbound SMS Settings
- Outbound can use Web API
"""
names = ("msg_mcommons_channel",)
def model(self):
#T = current.T
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
tablename = "msg_mcommons_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
default = True,
#label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("campaign_id", length=128, unique=True,
requires = IS_NOT_EMPTY(),
),
Field("url",
default = \
"https://secure.mcommons.com/api/messages",
requires = IS_URL()
),
Field("username",
requires = IS_NOT_EMPTY(),
),
Field("password", "password",
readable = False,
requires = IS_NOT_EMPTY(),
),
Field("query"),
Field("timestmp", "datetime",
writable = False,
),
*s3_meta_fields())
self.configure(tablename,
onaccept = self.msg_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "mcommons_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "mcommons_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "mcommons_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3ParsingModel(S3Model):
"""
Message Parsing Model
"""
names = ("msg_parser",
"msg_parsing_status",
"msg_session",
"msg_keyword",
"msg_sender",
"msg_parser_enabled",
"msg_parser_enable",
"msg_parser_disable",
"msg_parser_enable_interactive",
"msg_parser_disable_interactive",
)
def model(self):
T = current.T
define_table = self.define_table
set_method = self.set_method
channel_id = self.msg_channel_id
message_id = self.msg_message_id
# ---------------------------------------------------------------------
# Link between Message Channels and Parsers in parser.py
#
tablename = "msg_parser"
define_table(tablename,
# Source
channel_id(ondelete = "CASCADE"),
Field("function_name",
label = T("Parser"),
),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
*s3_meta_fields())
self.configure(tablename,
onaccept = self.msg_parser_onaccept,
)
set_method("msg", "parser",
method = "enable",
action = self.parser_enable_interactive)
set_method("msg", "parser",
method = "disable",
action = self.parser_disable_interactive)
set_method("msg", "parser",
method = "parse",
action = self.parser_parse)
# ---------------------------------------------------------------------
# Message parsing status
# - component to core msg_message table
#
tablename = "msg_parsing_status"
define_table(tablename,
# Component, not Instance
message_id(ondelete = "CASCADE"),
# Source
channel_id(ondelete = "CASCADE"),
Field("is_parsed", "boolean",
default = False,
label = T("Parsing Status"),
represent = lambda parsed: \
(parsed and [T("Parsed")] or \
[T("Not Parsed")])[0],
),
message_id("reply_id",
label = T("Reply"),
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Login sessions for Message Parsing
# - links a from_address with a login until expiry
#
tablename = "msg_session"
define_table(tablename,
Field("from_address"),
Field("email"),
Field("created_datetime", "datetime",
default = current.request.utcnow,
),
Field("expiration_time", "integer"),
Field("is_expired", "boolean",
default = False,
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Keywords for Message Parsing
#
tablename = "msg_keyword"
define_table(tablename,
Field("keyword",
label = T("Keyword"),
),
# @ToDo: Move this to a link table
self.event_incident_type_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Senders for Message Parsing
# - whitelist / blacklist / prioritise
#
tablename = "msg_sender"
define_table(tablename,
Field("sender",
label = T("Sender"),
),
# @ToDo: Make pe_id work for this
#self.super_link("pe_id", "pr_pentity"),
Field("priority", "integer",
label = T("Priority"),
),
*s3_meta_fields())
# ---------------------------------------------------------------------
return dict(msg_parser_enabled = self.parser_enabled,
msg_parser_enable = self.parser_enable,
msg_parser_disable = self.parser_disable,
)
# -----------------------------------------------------------------------------
@staticmethod
def parser_parse(r, **attr):
"""
Parse unparsed messages
S3Method for interactive requests
"""
record = r.record
current.s3task.async("msg_parse", args=[record.channel_id, record.function_name])
current.session.confirmation = \
current.T("The parse request has been submitted")
redirect(URL(f="parser"))
# -------------------------------------------------------------------------
@staticmethod
def parser_enabled(channel_id):
"""
Helper function to see if there is a Parser connected to a Channel
- used to determine whether to populate the msg_parsing_status table
"""
table = current.s3db.msg_parser
record = current.db(table.channel_id == channel_id).select(table.enabled,
limitby=(0, 1),
).first()
if record and record.enabled:
return True
else:
return False
# -------------------------------------------------------------------------
@staticmethod
def parser_enable(id):
"""
Enable a Parser
- Connect a Parser to a Channel
CLI API for shell scripts & to be called by S3Method
@ToDo: Ensure only 1 Parser is connected to any Channel at a time
"""
db = current.db
s3db = current.s3db
table = s3db.msg_parser
record = db(table.id == id).select(table.id, # needed for update_record
table.enabled,
table.channel_id,
table.function_name,
limitby=(0, 1),
).first()
if not record.enabled:
# Flag it as enabled
record.update_record(enabled = True)
channel_id = record.channel_id
function_name = record.function_name
# Do we have an existing Task?
ttable = db.scheduler_task
args = '[%s, "%s"]' % (channel_id, function_name)
query = ((ttable.function_name == "msg_parse") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby=(0, 1)).first()
if exists:
return "Parser already enabled"
else:
current.s3task.schedule_task("msg_parse",
args = [channel_id, function_name],
period = 300, # seconds
timeout = 300, # seconds
repeats = 0 # unlimited
)
return "Parser enabled"
# -------------------------------------------------------------------------
@staticmethod
def parser_enable_interactive(r, **attr):
"""
Enable a Parser
- Connect a Parser to a Channel
S3Method for interactive requests
"""
result = current.s3db.msg_parser_enable(r.id)
current.session.confirmation = result
redirect(URL(f="parser"))
# -------------------------------------------------------------------------
@staticmethod
def parser_disable(id):
"""
Disable a Parser
- Disconnect a Parser from a Channel
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
s3db = current.s3db
table = s3db.msg_parser
record = db(table.id == id).select(table.id, # needed for update_record
table.enabled,
table.channel_id,
table.function_name,
limitby=(0, 1),
).first()
if record.enabled:
# Flag it as disabled
record.update_record(enabled = False)
# Do we have an existing Task?
ttable = db.scheduler_task
args = '[%s, "%s"]' % (record.channel_id, record.function_name)
query = ((ttable.function_name == "msg_parse") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby=(0, 1)).first()
if exists:
# Disable all
db(query).update(status="STOPPED")
return "Parser disabled"
else:
return "Parser already disabled"
# -------------------------------------------------------------------------
@staticmethod
def parser_disable_interactive(r, **attr):
"""
Disable a Parser
- Disconnect a Parser from a Channel
S3Method for interactive requests
"""
result = current.s3db.msg_parser_disable(r.id)
current.session.confirmation = result
redirect(URL(f="parser"))
# -------------------------------------------------------------------------
@staticmethod
def msg_parser_onaccept(form):
"""
Process the Enabled Flag
"""
if form.record:
# Update form
# process of changed
if form.record.enabled and not form.vars.enabled:
current.s3db.msg_parser_disable(form.vars.id)
elif form.vars.enabled and not form.record.enabled:
current.s3db.msg_parser_enable(form.vars.id)
else:
# Create form
# Process only if enabled
if form.vars.enabled:
current.s3db.msg_parser_enable(form.vars.id)
# =============================================================================
class S3RSSModel(S3ChannelModel):
"""
RSS channel
"""
names = ("msg_rss_channel",
"msg_rss",
)
def model(self):
T = current.T
define_table = self.define_table
set_method = self.set_method
super_link = self.super_link
# ---------------------------------------------------------------------
# RSS Settings for an account
#
tablename = "msg_rss_channel"
define_table(tablename,
# Instance
super_link("channel_id", "msg_channel"),
Field("name", length=255, unique=True,
label = T("Name"),
),
Field("description",
label = T("Description"),
),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("url",
label = T("URL"),
requires = IS_URL(),
),
s3_datetime(label = T("Last Polled"),
writable = False,
),
Field("etag",
label = T("ETag"),
writable = False
),
*s3_meta_fields())
self.configure(tablename,
list_fields = ["name",
"description",
"enabled",
"url",
"date",
"channel_status.status",
],
onaccept = self.msg_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "rss_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "rss_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "rss_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# RSS Feed Posts
#
tablename = "msg_rss"
define_table(tablename,
# Instance
super_link("message_id", "msg_message"),
self.msg_channel_id(),
s3_datetime(default="now",
label = T("Published on"),
),
Field("title",
label = T("Title"),
),
Field("body", "text",
label = T("Content"),
),
Field("from_address",
label = T("Link"),
),
# http://pythonhosted.org/feedparser/reference-feed-author_detail.html
Field("author",
label = T("Author"),
),
# http://pythonhosted.org/feedparser/reference-entry-tags.html
Field("tags", "list:string",
label = T("Tags"),
),
self.gis_location_id(),
# Just present for Super Entity
Field("inbound", "boolean",
default = True,
readable = False,
writable = False,
),
*s3_meta_fields())
self.configure(tablename,
deduplicate = self.msg_rss_duplicate,
list_fields = ["channel_id",
"title",
"from_address",
"date",
"body"
],
super_entity = current.s3db.msg_message,
)
# ---------------------------------------------------------------------
return dict()
# ---------------------------------------------------------------------
@staticmethod
def msg_rss_duplicate(item):
"""
Import item deduplication, match by link (from_address)
@param item: the S3ImportItem instance
"""
if item.tablename == "msg_rss":
table = item.table
from_address = item.data.get("from_address")
query = (table.from_address == from_address)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3SMSModel(S3Model):
"""
SMS: Short Message Service
These can be received through a number of different gateways
- MCommons
- Modem (@ToDo: Restore this)
- Tropo
- Twilio
"""
names = ("msg_sms",)
def model(self):
#T = current.T
user = current.auth.user
if user and user.organisation_id:
# SMS Messages need to be tagged to their org so that they can be sent through the correct gateway
default = user.organisation_id
else:
default = None
# ---------------------------------------------------------------------
# SMS Messages: InBox & Outbox
#
tablename = "msg_sms"
self.define_table(tablename,
# Instance
self.super_link("message_id", "msg_message"),
self.msg_channel_id(),
self.org_organisation_id(default = default),
s3_datetime(default="now"),
Field("body", "text",
# Allow multi-part SMS
#length = 160,
#label = T("Message"),
),
Field("from_address",
#label = T("Sender"),
),
Field("to_address",
#label = T("To"),
),
Field("inbound", "boolean",
default = False,
#represent = lambda direction: \
# (direction and [T("In")] or \
# [T("Out")])[0],
#label = T("Direction")),
),
# Used e.g. for Clickatell
Field("remote_id",
#label = T("Remote ID"),
),
*s3_meta_fields())
self.configure(tablename,
super_entity = "msg_message",
)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3SMSOutboundModel(S3Model):
"""
SMS: Short Message Service
- Outbound Channels
These can be sent through a number of different gateways
- Modem
- SMTP
- Tropo
- Web API (inc Clickatell, MCommons, mVaayoo)
"""
names = ("msg_sms_outbound_gateway",
"msg_sms_modem_channel",
"msg_sms_smtp_channel",
"msg_sms_webapi_channel",
)
def model(self):
#T = current.T
configure = self.configure
define_table = self.define_table
# ---------------------------------------------------------------------
# SMS Outbound Gateway
# - select which gateway is in active use for which Organisation/Branch
#
tablename = "msg_sms_outbound_gateway"
define_table(tablename,
self.msg_channel_id(
requires = IS_ONE_OF(current.db, "msg_channel.channel_id",
S3Represent(lookup="msg_channel"),
instance_types = ("msg_sms_modem_channel",
"msg_sms_webapi_channel",
"msg_sms_smtp_channel",
),
sort = True,
),
),
#Field("outgoing_sms_handler", length=32,
# requires = IS_IN_SET(current.msg.GATEWAY_OPTS,
# zero = None),
# ),
# Allow selection of different gateways based on Organisation/Branch
self.org_organisation_id(),
# @ToDo: Allow selection of different gateways based on destination Location
#self.gis_location_id(),
# @ToDo: Allow addition of relevant country code (currently in deployment_settings)
#Field("default_country_code", "integer",
# default = 44),
*s3_meta_fields())
# ---------------------------------------------------------------------
# SMS Modem Channel
#
tablename = "msg_sms_modem_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("modem_port"),
Field("modem_baud", "integer",
default = 115200,
),
Field("enabled", "boolean",
default = True,
),
Field("max_length", "integer",
default = 160,
),
*s3_meta_fields())
configure(tablename,
super_entity = "msg_channel",
)
# ---------------------------------------------------------------------
# SMS via SMTP Channel
#
tablename = "msg_sms_smtp_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("address", length=64,
requires = IS_NOT_EMPTY(),
),
Field("subject", length=64),
Field("enabled", "boolean",
default = True,
),
Field("max_length", "integer",
default = 160,
),
*s3_meta_fields())
configure(tablename,
super_entity = "msg_channel",
)
# ---------------------------------------------------------------------
# Settings for Web API services
#
# @ToDo: Simplified dropdown of services which prepopulates entries & provides nice prompts for the config options
# + Advanced mode for raw access to real fields
#
tablename = "msg_sms_webapi_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("url",
default = "https://api.clickatell.com/http/sendmsg", # Clickatell
#default = "https://secure.mcommons.com/api/send_message", # Mobile Commons
requires = IS_URL(),
),
Field("parameters",
default = "user=yourusername&password=yourpassword&api_id=yourapiid", # Clickatell
#default = "campaign_id=yourid", # Mobile Commons
),
Field("message_variable", "string",
default = "text", # Clickatell
#default = "body", # Mobile Commons
requires = IS_NOT_EMPTY(),
),
Field("to_variable", "string",
default = "to", # Clickatell
#default = "phone_number", # Mobile Commons
requires = IS_NOT_EMPTY(),
),
Field("max_length", "integer",
default = 480, # Clickatell concat 3
),
# If using HTTP Auth (e.g. Mobile Commons)
Field("username"),
Field("password"),
Field("enabled", "boolean",
default = True,
),
*s3_meta_fields())
configure(tablename,
super_entity = "msg_channel",
)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3TropoModel(S3Model):
"""
Tropo can be used to send & receive SMS, Twitter & XMPP
https://www.tropo.com
"""
names = ("msg_tropo_channel",
"msg_tropo_scratch",
)
def model(self):
#T = current.T
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Tropo Channels
#
tablename = "msg_tropo_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
default = True,
#label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("token_messaging"),
#Field("token_voice"),
*s3_meta_fields())
self.configure(tablename,
super_entity = "msg_channel",
)
set_method("msg", "tropo_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "tropo_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "tropo_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# Tropo Scratch pad for outbound messaging
#
tablename = "msg_tropo_scratch"
define_table(tablename,
Field("row_id", "integer"),
Field("message_id", "integer"),
Field("recipient"),
Field("message"),
Field("network"),
)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3TwilioModel(S3ChannelModel):
"""
Twilio Inbound SMS channel
"""
names = ("msg_twilio_channel",
"msg_twilio_sid",
)
def model(self):
#T = current.T
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Twilio Channels
#
tablename = "msg_twilio_channel"
define_table(tablename,
# Instance
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
default = True,
#label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("account_name", length=255, unique=True),
Field("url",
default = \
"https://api.twilio.com/2010-04-01/Accounts"
),
Field("account_sid", length=64,
requires = IS_NOT_EMPTY(),
),
Field("auth_token", "password", length=64,
readable = False,
requires = IS_NOT_EMPTY(),
),
*s3_meta_fields())
self.configure(tablename,
onaccept = self.msg_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "twilio_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "twilio_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "twilio_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# Twilio Message extensions
# - store message sid to know which ones we've already downloaded
#
tablename = "msg_twilio_sid"
define_table(tablename,
# Component not Instance
self.msg_message_id(ondelete = "CASCADE"),
Field("sid"),
*s3_meta_fields())
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3TwitterModel(S3Model):
names = ("msg_twitter_channel",
"msg_twitter",
)
def model(self):
T = current.T
db = current.db
configure = self.configure
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Twitter Channel
#
tablename = "msg_twitter_channel"
define_table(tablename,
#Instance
self.super_link("channel_id", "msg_channel"),
# @ToDo: Allow different Twitter accounts for different PEs (Orgs / Teams)
#self.pr_pe_id(),
Field("name"),
Field("description"),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("twitter_account"),
Field("consumer_key", "password"),
Field("consumer_secret", "password"),
Field("access_token", "password"),
Field("access_token_secret", "password"),
*s3_meta_fields())
configure(tablename,
onaccept = self.msg_channel_onaccept,
#onvalidation = self.twitter_channel_onvalidation
super_entity = "msg_channel",
)
set_method("msg", "twitter_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "twitter_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "twitter_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# Twitter Messages: InBox & Outbox
#
tablename = "msg_twitter"
define_table(tablename,
# Instance
self.super_link("message_id", "msg_message"),
self.msg_channel_id(),
s3_datetime(default = "now",
label = T("Posted on"),
),
Field("body", length=140,
label = T("Message"),
),
Field("from_address", #notnull=True,
label = T("From"),
represent = self.twitter_represent,
requires = IS_NOT_EMPTY(),
),
Field("to_address",
label = T("To"),
represent = self.twitter_represent,
),
Field("inbound", "boolean",
default = False,
label = T("Direction"),
represent = lambda direction: \
(direction and [T("In")] or \
[T("Out")])[0],
),
Field("msg_id", # Twitter Message ID
readable = False,
writable = False,
),
*s3_meta_fields())
configure(tablename,
list_fields = ["id",
#"priority",
#"category",
"body",
"from_address",
"date",
#"location_id",
],
#orderby = ~table.priority,
super_entity = "msg_message",
)
# ---------------------------------------------------------------------
return dict()
# -------------------------------------------------------------------------
@staticmethod
def twitter_represent(nickname, show_link=True):
"""
Represent a Twitter account
"""
if not nickname:
return current.messages["NONE"]
db = current.db
s3db = current.s3db
table = s3db.pr_contact
query = (table.contact_method == "TWITTER") & \
(table.value == nickname)
row = db(query).select(table.pe_id,
limitby=(0, 1)).first()
if row:
repr = s3db.pr_pentity_represent(row.pe_id)
if show_link:
# Assume person
ptable = s3db.pr_person
row = db(ptable.pe_id == row.pe_id).select(ptable.id,
limitby=(0, 1)).first()
if row:
link = URL(c="pr", f="person", args=[row.id])
return A(repr, _href=link)
return repr
else:
return nickname
# -------------------------------------------------------------------------
@staticmethod
def twitter_channel_onvalidation(form):
"""
Complete oauth: take tokens from session + pin from form,
and do the 2nd API call to Twitter
"""
T = current.T
session = current.session
settings = current.deployment_settings.msg
s3 = session.s3
vars = form.vars
if vars.pin and s3.twitter_request_key and s3.twitter_request_secret:
try:
import tweepy
except:
raise HTTP(501, body=T("Can't import tweepy"))
oauth = tweepy.OAuthHandler(settings.twitter_oauth_consumer_key,
settings.twitter_oauth_consumer_secret)
oauth.set_request_token(s3.twitter_request_key,
s3.twitter_request_secret)
try:
oauth.get_access_token(vars.pin)
vars.oauth_key = oauth.access_token.key
vars.oauth_secret = oauth.access_token.secret
twitter = tweepy.API(oauth)
vars.twitter_account = twitter.me().screen_name
vars.pin = "" # we won't need it anymore
return
except tweepy.TweepError:
session.error = T("Settings were reset because authenticating with Twitter failed")
# Either user asked to reset, or error - clear everything
for k in ["oauth_key", "oauth_secret", "twitter_account"]:
vars[k] = None
for k in ["twitter_request_key", "twitter_request_secret"]:
s3[k] = ""
# =============================================================================
class S3TwitterSearchModel(S3ChannelModel):
"""
Twitter Searches
- results can be fed to KeyGraph
https://dev.twitter.com/docs/api/1.1/get/search/tweets
"""
names = ("msg_twitter_search",
"msg_twitter_result",
)
def model(self):
T = current.T
db = current.db
configure = self.configure
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Twitter Search Query
#
tablename = "msg_twitter_search"
define_table(tablename,
Field("keywords", "text",
label = T("Keywords"),
),
# @ToDo: Allow setting a Point & Radius for filtering by geocode
#self.gis_location_id(),
Field("lang",
# Set in controller
#default = current.response.s3.language,
label = T("Language"),
),
Field("count", "integer",
default = 100,
label = T("# Results per query"),
),
Field("include_entities", "boolean",
default = False,
label = T("Include Entity Information?"),
represent = s3_yes_no_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Entity Information"),
T("This is required if analyzing with KeyGraph."))),
),
# @ToDo: Rename or even move to Component Table
Field("is_processed", "boolean",
default = False,
label = T("Processed with KeyGraph?"),
represent = s3_yes_no_represent,
),
Field("is_searched", "boolean",
default = False,
label = T("Searched?"),
represent = s3_yes_no_represent,
),
*s3_meta_fields())
configure(tablename,
list_fields = ["keywords",
"lang",
"count",
#"include_entities",
],
)
# Reusable Query ID
represent = S3Represent(lookup=tablename, fields=["keywords"])
search_id = S3ReusableField("search_id", "reference %s" % tablename,
label = T("Search Query"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF_EMPTY(db, "msg_twitter_search.id")
),
)
set_method("msg", "twitter_search",
method = "poll",
action = self.twitter_search_poll)
set_method("msg", "twitter_search",
method = "keygraph",
action = self.twitter_keygraph)
set_method("msg", "twitter_result",
method = "timeline",
action = self.twitter_timeline)
# ---------------------------------------------------------------------
# Twitter Search Results
#
# @ToDo: Store the places mentioned in the Tweet as linked Locations
#
tablename = "msg_twitter_result"
define_table(tablename,
# Instance
self.super_link("message_id", "msg_message"),
# Just present for Super Entity
#self.msg_channel_id(),
search_id(),
s3_datetime(default="now",
label = T("Tweeted on"),
),
Field("tweet_id",
label = T("Tweet ID")),
Field("lang",
label = T("Language")),
Field("from_address",
label = T("Tweeted by")),
Field("body",
label = T("Tweet")),
# @ToDo: Populate from Parser
#Field("category",
# writable = False,
# label = T("Category"),
# ),
#Field("priority", "integer",
# writable = False,
# label = T("Priority"),
# ),
self.gis_location_id(),
# Just present for Super Entity
#Field("inbound", "boolean",
# default = True,
# readable = False,
# writable = False,
# ),
*s3_meta_fields())
configure(tablename,
list_fields = [#"category",
#"priority",
"body",
"from_address",
"date",
"location_id",
],
#orderby=~table.priority,
super_entity = "msg_message",
)
# ---------------------------------------------------------------------
return dict()
# -----------------------------------------------------------------------------
@staticmethod
def twitter_search_poll(r, **attr):
"""
Perform a Search of Twitter
S3Method for interactive requests
"""
id = r.id
tablename = r.tablename
current.s3task.async("msg_twitter_search", args=[id])
current.session.confirmation = \
current.T("The search request has been submitted, so new messages should appear shortly - refresh to see them")
# Filter results to this Search
redirect(URL(f="twitter_result",
vars={"~.search_id": id}))
# -----------------------------------------------------------------------------
@staticmethod
def twitter_keygraph(r, **attr):
"""
Prcoess Search Results with KeyGraph
S3Method for interactive requests
"""
tablename = r.tablename
current.s3task.async("msg_process_keygraph", args=[r.id])
current.session.confirmation = \
current.T("The search results are now being processed with KeyGraph")
# @ToDo: Link to KeyGraph results
redirect(URL(f="twitter_result"))
# =============================================================================
@staticmethod
def twitter_timeline(r, **attr):
"""
Display the Tweets on a Simile Timeline
http://www.simile-widgets.org/wiki/Reference_Documentation_for_Timeline
"""
if r.representation == "html" and r.name == "twitter_result":
response = current.response
s3 = response.s3
appname = r.application
# Add core Simile Code
s3.scripts.append("/%s/static/scripts/simile/timeline/timeline-api.js" % appname)
# Add our control script
if s3.debug:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.js" % appname)
else:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.min.js" % appname)
# Add our data
# @ToDo: Make this the initial data & then collect extra via REST with a stylesheet
# add in JS using S3.timeline.eventSource.addMany(events) where events is a []
if r.record:
# Single record
rows = [r.record]
else:
# Multiple records
# @ToDo: Load all records & sort to closest in time
# http://stackoverflow.com/questions/7327689/how-to-generate-a-sequence-of-future-datetimes-in-python-and-determine-nearest-d
rows = r.resource.select(["date", "body"], limit=2000, as_rows=True)
data = {"dateTimeFormat": "iso8601",
}
now = r.utcnow
tl_start = tl_end = now
events = []
import re
for row in rows:
# Dates
start = row.date or ""
if start:
if start < tl_start:
tl_start = start
if start > tl_end:
tl_end = start
start = start.isoformat()
title = (re.sub(r"(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)|RT", "", row.body))
if len(title) > 30:
title = title[:30]
events.append({"start": start,
"title": title,
"description": row.body,
})
data["events"] = events
data = json.dumps(data, separators=SEPARATORS)
code = "".join((
'''S3.timeline.data=''', data, '''
S3.timeline.tl_start="''', tl_start.isoformat(), '''"
S3.timeline.tl_end="''', tl_end.isoformat(), '''"
S3.timeline.now="''', now.isoformat(), '''"
'''))
# Control our code in static/scripts/S3/s3.timeline.js
s3.js_global.append(code)
# Create the DIV
item = DIV(_id="s3timeline", _class="s3-timeline")
output = dict(item=item)
# Maintain RHeader for consistency
if attr.get("rheader"):
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
output["title"] = current.T("Twitter Timeline")
response.view = "timeline.html"
return output
else:
r.error(405, current.ERROR.BAD_METHOD)
# =============================================================================
class S3XFormsModel(S3Model):
"""
XForms are used by the ODK Collect mobile client
http://eden.sahanafoundation.org/wiki/BluePrint/Mobile#Android
"""
names = ("msg_xforms_store",)
def model(self):
#T = current.T
# ---------------------------------------------------------------------
# SMS store for persistence and scratch pad for combining incoming xform chunks
tablename = "msg_xforms_store"
self.define_table(tablename,
Field("sender", length=20),
Field("fileno", "integer"),
Field("totalno", "integer"),
Field("partno", "integer"),
Field("message", length=160)
)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3BaseStationModel(S3Model):
"""
Base Stations (Cell Towers) are a type of Site
@ToDo: Calculate Coverage from Antenna Height, Radio Power and Terrain
- see RadioMobile
"""
names = ("msg_basestation",)
def model(self):
T = current.T
define_table = self.define_table
# ---------------------------------------------------------------------
# Base Stations (Cell Towers)
#
tablename = "msg_basestation"
define_table(tablename,
self.super_link("site_id", "org_site"),
Field("name", notnull=True,
length=64, # Mayon Compatibility
label = T("Name"),
),
Field("code", length=10, # Mayon compatibility
label = T("Code"),
# Deployments that don't wants site codes can hide them
#readable = False,
#writable = False,
# @ToDo: Deployment Setting to add validator to make these unique
),
self.org_organisation_id(
label = T("Operator"),
#widget=S3OrganisationAutocompleteWidget(default_from_profile=True),
requires = self.org_organisation_requires(required=True,
updateable=True),
),
self.gis_location_id(),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_BASE = T("Create Base Station")
current.response.s3.crud_strings[tablename] = Storage(
label_create=T("Create Base Station"),
title_display=T("Base Station Details"),
title_list=T("Base Stations"),
title_update=T("Edit Base Station"),
title_upload=T("Import Base Stations"),
title_map=T("Map of Base Stations"),
label_list_button=T("List Base Stations"),
label_delete_button=T("Delete Base Station"),
msg_record_created=T("Base Station added"),
msg_record_modified=T("Base Station updated"),
msg_record_deleted=T("Base Station deleted"),
msg_list_empty=T("No Base Stations currently registered"))
self.configure(tablename,
deduplicate = self.msg_basestation_duplicate,
super_entity = "org_site",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict()
# ---------------------------------------------------------------------
@staticmethod
def msg_basestation_duplicate(item):
"""
Import item deduplication, match by name
(Adding location_id doesn't seem to be a good idea)
@param item: the S3ImportItem instance
"""
if item.tablename == "msg_basestation":
table = item.table
name = "name" in item.data and item.data.name
query = (table.name.lower() == name.lower())
#location_id = None
# if "location_id" in item.data:
# location_id = item.data.location_id
## This doesn't find deleted records:
# query = query & (table.location_id == location_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
# if duplicate is None and location_id:
## Search for deleted basestations with this name
# query = (table.name.lower() == name.lower()) & \
# (table.deleted == True)
# row = db(query).select(table.id, table.deleted_fk,
# limitby=(0, 1)).first()
# if row:
# fkeys = json.loads(row.deleted_fk)
# if "location_id" in fkeys and \
# str(fkeys["location_id"]) == str(location_id):
# duplicate = row
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# END =========================================================================
| mit | 2,607,987,622,534,597,000 | 37.903325 | 141 | 0.390586 | false |
jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_chart_combined03.py | 1 | 1649 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_combined03.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {'xl/charts/chart1.xml': ['<c:dispBlanksAs']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart1 = workbook.add_chart({'type': 'column'})
chart2 = workbook.add_chart({'type': 'line'})
data = [
[2, 7, 3, 6, 2],
[20, 25, 10, 10, 20],
[4, 2, 5, 2, 1],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart1.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart1.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart2.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart1.combine(chart2)
worksheet.insert_chart('E9', chart1)
workbook.close()
self.assertExcelEqual()
| bsd-2-clause | 8,504,918,320,747,612,000 | 26.032787 | 79 | 0.554882 | false |
dnguyen0304/clare | clare/clare/common/messaging/consumer/interfaces.py | 1 | 1596 | # -*- coding: utf-8 -*-
import abc
class IConsumer(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def consume(self, interval, timeout):
"""
Parameters
----------
interval : float
Rate of work. The units are in seconds.
timeout : float
Maximum duration to try fetching a new record. The units
are in seconds.
Returns
-------
None
"""
pass
@abc.abstractmethod
def _consume_once(self, timeout):
"""
Parameters
----------
timeout : float
Maximum duration to try fetching a new record. The units
are in seconds.
Returns
-------
None
Raises
------
clare.common.messaging.consumer.exceptions.FetchTimeout
If the fetcher times out before fetching the minimum fetch size.
"""
pass
class IFetcher(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def pop(self, timeout):
"""
Parameters
----------
timeout : float
Returns
-------
clare.common.messaging.records.Record
Raises
------
clare.common.messaging.consumer.exceptions.FetchTimeout
"""
pass
class IHandler(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def handle(self, record):
"""
Parameters
----------
record : clare.common.messaging.records.Record
"""
pass
| mit | -8,734,426,544,833,338,000 | 16.932584 | 76 | 0.506892 | false |
fredmorcos/attic | projects/plantmaker/plantmaker-main/src/benchmark/benchmark.py | 1 | 2905 | from os import path
from extra.printer import pprint, GREEN, BLUE, RED
class Benchmark(object):
useCairoPlot = False
useGnuPlot = True
def __init__(self, plant, orderList, testNumber):
self.prefix = "generic"
self.testName = "generic"
self.testNumber = testNumber
self.cairoPlotTimes = []
self.gnuPlotTimes = []
self.plant = plant
self.orderList = orderList
self.orderListSize = -1
self.machineListSize = -1
self.startValue = 0
def addGnuPlotTime(self, x, y):
self.gnuPlotTimes.append((x, y))
pprint("PERF Time = " + str(y), GREEN)
def addCairoPlotTime(self, t):
self.cairoPlotTimes.append(t)
pprint("PERF Time = " + str(t), GREEN)
def prepare(self):
pprint("PERF Starting " + self.prefix + " benchmark test " +
str(self.testNumber) + " on " + self.testName, BLUE)
if self.orderListSize != -1:
self.orderList.orders = self.orderList.orders[:self.orderListSize]
if self.machineListSize != -1:
self.plant.machines = self.plant.machines[:self.machineListSize]
self.times = [i * 0 for i in range(self.startValue)]
def save(self):
if Benchmark.useCairoPlot == True:
self.plotCairoPlot()
if Benchmark.useGnuPlot == True:
self.plotGnuPlot()
def plotGnuPlot(self):
import os, subprocess
p = subprocess.Popen(['/usr/bin/gnuplot'], stdin = subprocess.PIPE,
stdout = subprocess.PIPE, stderr = subprocess.PIPE, cwd = os.getcwd())
output = ""
hasFloat = False
for i in self.gnuPlotTimes:
if type(i[0]) == float:
hasFloat = True
output += str(i[0]) + " " + str(i[1]) + "\n"
with open("plantmaker-tmp", "w") as f:
f.write(output)
f.close()
of = "benchmarks/" + self.prefix + "-" + self.testName + "-" + \
str(self.testNumber) + "-gp.eps"
commString = "set grid; set term postscript; set out '" + of + "'; " + \
"set format y \"%.4f\"; " + "set xlabel \"" + self.testName + "\"; " + \
"set ylabel \"Time (Seconds)\"; unset key; "
if hasFloat == True:
commString += "set format x \"%.1f\"; "
commString += "plot 'plantmaker-tmp' with lines lw 3, 'plantmaker-tmp' with points pt 7 ps 1\n"
p.communicate(commString)
p.wait()
os.remove("plantmaker-tmp")
def plotCairoPlot(self):
try:
from thirdparty.CairoPlot import dot_line_plot
except:
pprint("PERF Will not output to graph. Install CairoPlot.", RED)
return
dot_line_plot(path.join("benchmarks", self.prefix + "-" + self.testName +
"-" + str(self.testNumber)) + ".png",
self.cairoPlotTimes, 800, 800, (255, 255, 255), 5, True, True, True,
None, None, None, None)
dot_line_plot(path.join("benchmarks", self.prefix + "-" + self.testName +
"-" + str(self.testNumber)) + ".ps",
self.cairoPlotTimes, 800, 800, (255, 255, 255), 5, True, True, True,
None, None, None, None)
def run(self):
self.prepare()
self.bench()
self.save()
def bench(self):
pass
| isc | -2,449,704,126,314,705,400 | 28.343434 | 97 | 0.63821 | false |
cdubz/babybuddy | reports/graphs/feeding_amounts.py | 1 | 1422 | # -*- coding: utf-8 -*-
from django.utils import timezone
from django.utils.translation import gettext as _
import plotly.offline as plotly
import plotly.graph_objs as go
from reports import utils
def feeding_amounts(instances):
"""
Create a graph showing daily feeding amounts over time.
:param instances: a QuerySet of Feeding instances.
:returns: a tuple of the the graph's html and javascript.
"""
totals = {}
for instance in instances:
end = timezone.localtime(instance.end)
date = end.date()
if date not in totals.keys():
totals[date] = 0
totals[date] += instance.amount or 0
amounts = [round(amount, 2) for amount in totals.values()]
trace = go.Bar(
name=_('Total feeding amount'),
x=list(totals.keys()),
y=amounts,
hoverinfo='text',
textposition='outside',
text=amounts
)
layout_args = utils.default_graph_layout_options()
layout_args['title'] = _('<b>Total Feeding Amounts</b>')
layout_args['xaxis']['title'] = _('Date')
layout_args['xaxis']['rangeselector'] = utils.rangeselector_date()
layout_args['yaxis']['title'] = _('Feeding amount')
fig = go.Figure({
'data': [trace],
'layout': go.Layout(**layout_args)
})
output = plotly.plot(fig, output_type='div', include_plotlyjs=False)
return utils.split_graph_output(output)
| bsd-2-clause | -5,724,312,530,632,887,000 | 29.913043 | 72 | 0.627286 | false |
geraldinepascal/FROGS | tools/phyloseq_beta_diversity/phyloseq_beta_diversity.py | 1 | 7336 | #!/usr/bin/env python3
#
# Copyright (C) 2018 INRA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__author__ = 'Ta Thi Ngan & Maria Bernard INRA - SIGENAE'
__copyright__ = 'Copyright (C) 2017 INRA'
__license__ = 'GNU General Public License'
__version__ = '3.2.3'
__email__ = 'frogs-support@inrae.fr'
__status__ = 'prod'
import os
import sys
import argparse
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
FROGS_DIR=""
if CURRENT_DIR.endswith("phyloseq_beta_diversity"):
FROGS_DIR = os.path.dirname(os.path.dirname(CURRENT_DIR))
else:
FROGS_DIR = os.path.dirname(CURRENT_DIR)
# PATH
BIN_DIR = os.path.abspath(os.path.join(FROGS_DIR, "libexec"))
os.environ['PATH'] = BIN_DIR + os.pathsep + os.environ['PATH']
APP_DIR = os.path.abspath(os.path.join(FROGS_DIR, "app"))
os.environ['PATH'] = APP_DIR + os.pathsep + os.environ['PATH']
# PYTHONPATH
LIB_DIR = os.path.abspath(os.path.join(FROGS_DIR, "lib"))
sys.path.append(LIB_DIR)
if os.getenv('PYTHONPATH') is None: os.environ['PYTHONPATH'] = LIB_DIR
else: os.environ['PYTHONPATH'] = LIB_DIR + os.pathsep + os.environ['PYTHONPATH']
# LIBR
LIBR_DIR = os.path.join(LIB_DIR,"external-lib")
from frogsUtils import *
##################################################################################################################################################
#
# COMMAND LINES
#
##################################################################################################################################################
class Rscript(Cmd):
"""
@summary: Launch Rmarkdown script to present the data beta diversity with phyloseq.
@see: http://rmarkdown.rstudio.com/
https://joey711.github.io/phyloseq/
@return: html file containing the plots
beta divesity distance matrix tsv file(s)
"""
def __init__(self, html, phyloseq, varExp, methods, outdir, rmd_stderr):
"""
@param html: [str] path to store resulting html file.
@param phyloseq: [str] path to phyloseq object in RData file, the result of FROGS Phyloseq Import Data.
@param varExp: [str] Experiment variable to split plot.
@param methods: [str] one or more of beta diversity method.
@param outdir: [str] The path to store resulting beta diversity distance matrix.
@param rmd_stderr: [str] Path to temporary Rmarkdown stderr output file
"""
rmd = os.path.join(CURRENT_DIR, "phyloseq_beta_diversity.Rmd")
Cmd.__init__( self,
'Rscript',
'Run 1 code Rmarkdown',
'-e "rmarkdown::render(' + "'" + rmd + "',knit_root_dir='" + outdir + "',output_file='" + html + \
"', params=list(phyloseq='" + phyloseq + "', varExp='" + varExp + "', methods='" + methods + "', libdir ='" + LIBR_DIR + "'), intermediates_dir='" + os.path.dirname(html) + "')" + '" 2> ' + rmd_stderr,
"-e '(sessionInfo()[[1]][13])[[1]][1]; paste(\"Rmarkdown version: \",packageVersion(\"rmarkdown\")) ; library(phyloseq); paste(\"Phyloseq version: \",packageVersion(\"phyloseq\"))'")
def get_version(self):
"""
@summary: Returns the program version number.
@return: [str] Version number if this is possible, otherwise this method return 'unknown'.
"""
return Cmd.get_version(self, 'stdout')
##################################################################################################################################################
#
# MAIN
#
##################################################################################################################################################
if __name__ == "__main__":
# Manage parameters
parser = argparse.ArgumentParser( description='To present the data beta diversity with phyloseq.')
parser.add_argument( '--debug', default=False, action='store_true', help="Keep temporary files to debug program." )
parser.add_argument( '--version', action='version', version=__version__ )
parser.add_argument('-v', '--varExp', type=str, required=True, default=None, help='The experiment variable you want to analyse.')
parser.add_argument('-m', '--distance-methods', required=True, type=str, default='bray,cc,unifrac,wunifrac', help='Comma separated values beta diversity methods available in Phyloseq (see https://www.bioconductor.org/packages/devel/bioc/manuals/phyloseq/man/phyloseq.pdf). [Default: %(default)s].')
# Inputs
group_input = parser.add_argument_group( 'Inputs' )
group_input.add_argument('-r','--rdata', required=True, default=None, help="The path of RData file containing a phyloseq object-the result of FROGS Phyloseq Import Data" )
# output
group_output = parser.add_argument_group( 'Outputs' )
group_output.add_argument('--matrix-outdir', required=True, action="store", type=str, help="Path to output matrix file")
group_output.add_argument('-o','--html', default='phyloseq_beta_diversity.nb.html', help="The HTML file containing the graphs. [Default: %(default)s]" )
group_output.add_argument( '-l', '--log-file', default=sys.stdout, help='This output file will contain several informations on executed commands.')
args = parser.parse_args()
prevent_shell_injections(args)
Logger.static_write(args.log_file, "## Application\nSoftware :" + sys.argv[0] + " (version : " + str(__version__) + ")\nCommand : " + " ".join(sys.argv) + "\n\n")
# check parameter
list_distance=["unifrac","wunifrac","bray","cc","dpcoa","jsd","manhattan","euclidean","canberra","kulczynski","jaccard","gower","altGower","morisita","horn","mountford","raup","binomial","chao","cao","wt","-1","c","wb","rt","I","e","t","me","j","sor","m","-2","co","g","-3","l","19","hk","rlb","sim","gl","z","maximum","binary","minkowski","ANY"]
methods = args.distance_methods.strip() if not args.distance_methods.strip()[-1]=="," else args.distance_methods.strip()[:-1]
for method in methods.split(","):
if method not in list_distance:
raise_exception( Exception( '\n\n#ERROR : Your method "'+str(method)+'", name is not correct !!! Please make sure that it is in the list:'+str(list_distance)+"\n\n"))
# Process
outdir = os.path.abspath(args.matrix_outdir)
if not os.path.exists(outdir):
os.makedirs(outdir)
phyloseq=os.path.abspath(args.rdata)
html=os.path.abspath(args.html)
try:
tmpFiles = TmpFiles(os.path.dirname(html))
rmd_stderr = tmpFiles.add("rmarkdown.stderr")
Rscript(html, phyloseq, args.varExp, methods, outdir, rmd_stderr).submit( args.log_file )
finally :
if not args.debug:
tmpFiles.deleteAll()
| gpl-3.0 | -1,608,606,170,957,709,000 | 54.157895 | 350 | 0.598555 | false |
Detailscool/YHSpider | JiraStoryMaker/JiraStoryMaker2.py | 1 | 5993 | #!/usr/bin/python
# -*- coding:utf-8 -*-
# JiraStoryMaker.py
# Created by Henry on 2018/4/9
# Description :
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import os
import json
import time
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def create_story(**kwargs):
summary_text = kwargs.get('summary_text', None)
work_time_text = kwargs.get('work_time_text', None)
REQ = kwargs.get('REQ', None)
isFirst = kwargs.get('isFirst', False)
time.sleep(1)
new_button = driver.find_element_by_css_selector('#create_link')
new_button.click()
WebDriverWait(driver, 10000).until(
EC.presence_of_element_located((By.CSS_SELECTOR, 'span.drop-menu'))
)
drop_menus = driver.find_elements_by_css_selector('span.drop-menu')
if isFirst:
project = drop_menus[0]
project.click()
data_suggestions = driver.find_element_by_id('project-options').get_attribute('data-suggestions')
items = json.loads(data_suggestions)
# print items
if isinstance(items, list) and items and isinstance(items[0], dict) and isinstance(items[0]['items'], list) and items[0]['items'] and isinstance(items[0]['items'][0], dict) and items[0]['items'][0]['label']:
select_group = items[0]['items'][0]['label']
if u'IOSZHIBO' not in select_group:
groups = [a for a in driver.find_elements_by_css_selector('li a.aui-list-item-link') if 'IOSZHIBO' in a.text]
# print '\ngroups:', groups
if groups:
groups[0].click()
print 'click'
time.sleep(0.5)
else:
project.click()
story_type = driver.find_element_by_id('issuetype-single-select')
story_type.click()
story_type_groups = [a for a in driver.find_elements_by_css_selector('li a.aui-list-item-link') if u'故事'==a.text]
if story_type_groups:
story_type_groups[0].click()
time.sleep(0.5)
drop_menus = driver.find_elements_by_css_selector('span.drop-menu')
if len(drop_menus) < 5:
time.sleep(10)
print '出错啦'
sys.exit(1)
test_type = Select(driver.find_element_by_id('customfield_10200'))
test_type.select_by_value('10202')
time.sleep(0.5)
requirement = Select(driver.find_element_by_id('customfield_10101'))
requirement.select_by_value('10101')
time.sleep(0.5)
summary = driver.find_element_by_id('summary')
summary.send_keys(unicode(summary_text))
time.sleep(0.5)
work_time = driver.find_element_by_id('customfield_10833')
work_time.send_keys(work_time_text)
time.sleep(0.5)
sprint = drop_menus[5]
sprint.click()
sprint_groups = []
while not sprint_groups:
time.sleep(0.5)
sprint_groups = [a for a in driver.find_elements_by_css_selector('li a') if group in a.text and u'在用' in a.text]
sprint_groups[0].click()
time.sleep(0.5)
# time.sleep(15)
# code = driver.find_element_by_id('customfield_10503-3')
# code.click()
if REQ:
question = driver.find_element_by_css_selector('#issuelinks-issues-multi-select textarea')
question.send_keys(unicode(REQ))
time.sleep(0.5)
items = driver.find_elements_by_css_selector('li.menu-item')
if items and len(items) > 1:
relationship_item = items[1]
relationship_item.click()
time.sleep(0.5)
dev_person = driver.find_element_by_css_selector('#customfield_10300_container textarea')
if dev_person and login_token.split('-'):
dev_person.send_keys(login_token.split('-')[0])
time.sleep(0.5)
tester_person = driver.find_element_by_css_selector('#customfield_10400_container textarea')
if tester_person and tester:
tester_person.send_keys(tester)
time.sleep(0.5)
submit = driver.find_element_by_id('create-issue-submit')
submit.click()
WebDriverWait(driver, 10000).until(
EC.element_to_be_clickable((By.XPATH, '//*[@id="aui-flag-container"]/div/div/a'))
)
story = driver.find_element_by_xpath('//*[@id="aui-flag-container"]/div/div/a')
story_href = story.get_attribute('href')
print summary_text, ': ', story_href
# print '已建: ', summary_text, ', 时长, :', work_time_text, '天'
driver.refresh()
if __name__ == '__main__':
login_token = sys.argv[1]
file_path = sys.argv[2]
tester = sys.argv[3]
if not os.path.exists(file_path):
print '出错啦'
sys.exit(1)
else:
with open(file_path, 'r') as f:
lines = f.readlines()
f.close()
if '-' not in login_token:
print '出错啦'
sys.exit(1)
elif len(login_token.split('-')[-1]) != 32:
print '出错啦'
sys.exit(1)
chrome_options = webdriver.ChromeOptions()
# chrome_options.add_argument('--headless')
driver = webdriver.Chrome(chrome_options=chrome_options)
url = '' + login_token
print url
driver.get(url)
# print driver.get_cookies()
group = u'iOS直播服务组'
for idx, line in enumerate(lines):
if ',' in line and ',' not in line:
words = line.encode('utf-8').strip().split(',')
elif ',' in line and ',' not in line:
words = line.encode('utf-8').strip().split(',')
else:
words = []
if len(words) == 2:
create_story(summary_text=words[0].strip(), work_time_text=words[1].strip(), isFirst=(idx==0))
elif len(words) == 3:
create_story(summary_text=words[0].strip(), work_time_text=words[1].strip(), REQ=words[2].strip(), isFirst=(idx==0))
driver.close() | mit | -496,559,804,413,257,400 | 31.78453 | 215 | 0.607281 | false |
sdgathman/pymilter | testsample.py | 1 | 5060 | import unittest
import Milter
import sample
import template
import mime
import zipfile
from Milter.test import TestBase
from Milter.testctx import TestCtx
class TestMilter(TestBase,sample.sampleMilter):
def __init__(self):
TestBase.__init__(self)
sample.sampleMilter.__init__(self)
class BMSMilterTestCase(unittest.TestCase):
def setUp(self):
self.zf = zipfile.ZipFile('test/virus.zip','r')
self.zf.setpassword(b'denatured')
def tearDown(self):
self.zf.close()
self.zf = None
def testTemplate(self,fname='test2'):
ctx = TestCtx()
Milter.factory = template.myMilter
ctx._setsymval('{auth_authen}','batman')
ctx._setsymval('{auth_type}','batcomputer')
ctx._setsymval('j','mailhost')
count = 10
while count > 0:
rc = ctx._connect(helo='milter-template.example.org')
self.assertEquals(rc,Milter.CONTINUE)
with open('test/'+fname,'rb') as fp:
rc = ctx._feedFile(fp)
milter = ctx.getpriv()
self.assertFalse(ctx._bodyreplaced,"Message body replaced")
ctx._close()
count -= 1
def testHeader(self,fname='utf8'):
ctx = TestCtx()
Milter.factory = sample.sampleMilter
ctx._setsymval('{auth_authen}','batman')
ctx._setsymval('{auth_type}','batcomputer')
ctx._setsymval('j','mailhost')
rc = ctx._connect()
self.assertEquals(rc,Milter.CONTINUE)
with open('test/'+fname,'rb') as fp:
rc = ctx._feedFile(fp)
milter = ctx.getpriv()
self.assertFalse(ctx._bodyreplaced,"Message body replaced")
fp = ctx._body
with open('test/'+fname+".tstout","wb") as ofp:
ofp.write(fp.getvalue())
ctx._close()
def testCtx(self,fname='virus1'):
ctx = TestCtx()
Milter.factory = sample.sampleMilter
ctx._setsymval('{auth_authen}','batman')
ctx._setsymval('{auth_type}','batcomputer')
ctx._setsymval('j','mailhost')
rc = ctx._connect()
self.assertTrue(rc == Milter.CONTINUE)
with self.zf.open(fname) as fp:
rc = ctx._feedFile(fp)
milter = ctx.getpriv()
# self.assertTrue(milter.user == 'batman',"getsymval failed: "+
# "%s != %s"%(milter.user,'batman'))
self.assertEquals(milter.user,'batman')
self.assertTrue(milter.auth_type != 'batcomputer',"setsymlist failed")
self.assertTrue(rc == Milter.ACCEPT)
self.assertTrue(ctx._bodyreplaced,"Message body not replaced")
fp = ctx._body
with open('test/'+fname+".tstout","wb") as f:
f.write(fp.getvalue())
#self.assertTrue(fp.getvalue() == open("test/virus1.out","r").read())
fp.seek(0)
msg = mime.message_from_file(fp)
s = msg.get_payload(1).get_payload()
milter.log(s)
ctx._close()
def testDefang(self,fname='virus1'):
milter = TestMilter()
milter.setsymval('{auth_authen}','batman')
milter.setsymval('{auth_type}','batcomputer')
milter.setsymval('j','mailhost')
rc = milter.connect()
self.assertTrue(rc == Milter.CONTINUE)
with self.zf.open(fname) as fp:
rc = milter.feedFile(fp)
self.assertTrue(milter.user == 'batman',"getsymval failed")
# setsymlist not working in TestBase
#self.assertTrue(milter.auth_type != 'batcomputer',"setsymlist failed")
self.assertTrue(rc == Milter.ACCEPT)
self.assertTrue(milter._bodyreplaced,"Message body not replaced")
fp = milter._body
with open('test/'+fname+".tstout","wb") as f:
f.write(fp.getvalue())
#self.assertTrue(fp.getvalue() == open("test/virus1.out","r").read())
fp.seek(0)
msg = mime.message_from_file(fp)
s = msg.get_payload(1).get_payload()
milter.log(s)
milter.close()
def testParse(self,fname='spam7'):
milter = TestMilter()
milter.connect('somehost')
rc = milter.feedMsg(fname)
self.assertTrue(rc == Milter.ACCEPT)
self.assertFalse(milter._bodyreplaced,"Milter needlessly replaced body.")
fp = milter._body
with open('test/'+fname+".tstout","wb") as f:
f.write(fp.getvalue())
milter.close()
def testDefang2(self):
milter = TestMilter()
milter.connect('somehost')
rc = milter.feedMsg('samp1')
self.assertTrue(rc == Milter.ACCEPT)
self.assertFalse(milter._bodyreplaced,"Milter needlessly replaced body.")
with self.zf.open("virus3") as fp:
rc = milter.feedFile(fp)
self.assertTrue(rc == Milter.ACCEPT)
self.assertTrue(milter._bodyreplaced,"Message body not replaced")
fp = milter._body
with open("test/virus3.tstout","wb") as f:
f.write(fp.getvalue())
#self.assertTrue(fp.getvalue() == open("test/virus3.out","r").read())
with self.zf.open("virus6") as fp:
rc = milter.feedFile(fp)
self.assertTrue(rc == Milter.ACCEPT)
self.assertTrue(milter._bodyreplaced,"Message body not replaced")
self.assertTrue(milter._headerschanged,"Message headers not adjusted")
fp = milter._body
with open("test/virus6.tstout","wb") as f:
f.write(fp.getvalue())
milter.close()
def suite(): return unittest.makeSuite(BMSMilterTestCase,'test')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 4,102,225,718,785,301,500 | 33.189189 | 77 | 0.651383 | false |
StuartGordonReid/Comp-Finance | Optimizers/Solution.py | 1 | 1512 | __author__ = 'Stuart Gordon Reid'
__email__ = 'stuartgordonreid@gmail.com'
__website__ = 'http://www.stuartreid.co.za'
"""
File description
"""
class Solution(object):
solution = []
def __init__(self, solution, problem):
"""
Abstract initialization method for a solution to some optimization function
:param solution: a numpy array (much faster than lists)
"""
self.solution = solution
self.problem = problem
return
def __len__(self):
"""
Overload of the len operator for the Solution class
:rtype : Sized?
"""
return len(self.solution)
def update(self, solution):
"""
This method is used for updating a solution
"""
self.solution = solution
def get(self):
"""
This method is used to retrieve the numpy array for direct manipulation
"""
return self.solution
def evaluate(self):
return self.problem.evaluate(self.solution)
def __gt__(self, other):
assert isinstance(other, Solution)
if self.problem.optimization is "min":
return self.evaluate() < other.evaluate()
elif self.problem.optimization is "max":
return self.evaluate() > other.evaluate()
def deep_copy(self):
copy = Solution(None, self.problem)
copy.solution = []
for i in range(len(self.solution)):
copy.solution.append(self.solution[i])
return copy
| lgpl-3.0 | 2,597,317,269,718,818,000 | 25.526316 | 83 | 0.587963 | false |
ModoUnreal/PyWeather | setup.py | 1 | 99122 | '''
_______
| \ \ / @@@;
| \ \ / `#....@
| | \ / ,;@.....;,;
| | \ / @..@........@` PyWeather Setup
| | \ / .............@ version 0.6.3 beta
| / \ / .............@ (c) 2017-2018 - o355
|_______/ | @...........#`
| | .+@@++++@#;
| | @ ; ,
| | : ' .
| | @ # .`
| | @ # .`
'''
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
if sys.version_info < (3, 0, 0):
print("You'll need Python 3 to run PyWeather.",
"Press enter to exit.")
input()
sys.exit()
elif (sys.version_info > (3, 0, 0)
and sys.version_info < (3, 5, 0)):
print("You have a Python version between 3.0 and 3.4.",
"While PyWeather will work, you may experience a few quirks.",
"Try updating to Python 3.6, as it works more reliably.",
"Please take note of this in PyWeather.","", sep="\n")
elif sys.version_info >= (3, 7, 0):
print("You have a Python version of 3.7 and greater.",
"Please note that PyWeather 0.6.2 beta is NOT certified to work with",
"Python 3.7. Python 3.6 and below should work just fine.", sep="\n")
import configparser
import traceback
import subprocess
import logging
import os
import urllib
# Now force the writing of the versioninfo file during setup, this should prevent issues
# in the event I forget to gitignore the file.
try:
open('updater//versioninfo.txt', 'w').close()
with open("updater//versioninfo.txt", 'a') as out:
out.write("0.6.3 beta")
out.close()
except:
print("Couldn't write the versioninfo file. This may cause issues with PyWeather down the road.")
config = configparser.ConfigParser()
config.read('storage//config.ini')
def configprovision():
try:
config.add_section("GEOCODER API")
except configparser.DuplicateSectionError:
print("Failed to add the Geocoder API section.")
try:
config.add_section("FAVORITE LOCATIONS")
except configparser.DuplicateSectionError:
print("Failed to add the favorite locations section.")
try:
config.add_section("PREVIOUS LOCATIONS")
except configparser.DuplicateSectionError:
print("Failed to add the previous locations section")
try:
config.add_section("HURRICANE")
except configparser.DuplicateSectionError:
print("Failed to add the hurricane section.")
try:
config.add_section("FIRSTINPUT")
except configparser.DuplicateSectionError:
print("Failed to add the firstinput section.")
try:
config.add_section('SUMMARY')
except configparser.DuplicateSectionError:
print("Failed to add the summary section.")
try:
config.add_section('VERBOSITY')
except configparser.DuplicateSectionError:
print("Failed to add the verbosity section.")
try:
config.add_section('TRACEBACK')
except configparser.DuplicateSectionError:
print("Failed to add the traceback section.")
try:
config.add_section('UI')
except configparser.DuplicateSectionError:
print("Failed to add the UI section.")
try:
config.add_section('PREFETCH')
except configparser.DuplicateSectionError:
print("Failed to add the prefetch section.")
try:
config.add_section('UPDATER')
except configparser.DuplicateSectionError:
print("Failed to add the updater section.")
try:
config.add_section('KEYBACKUP')
except configparser.DuplicateSectionError:
print("Failed to add the keybackup section.")
try:
config.add_section('PYWEATHER BOOT')
except configparser.DuplicateSectionError:
print("Failed to add the PyWeather Boot section.")
try:
config.add_section('USER')
except configparser.DuplicateSectionError:
print("Failed to add the user section.")
try:
config.add_section('CACHE')
except configparser.DuplicateSectionError:
print("Failed to add the cache section.")
try:
config.add_section('RADAR GUI')
except configparser.DuplicateSectionError:
print("Failed to add the Radar GUI section.")
try:
config.add_section('GEOCODER')
except configparser.DuplicateSectionError:
print("Failed to add the Geocoder section.")
config['SUMMARY']['sundata_summary'] = 'False'
config['SUMMARY']['almanac_summary'] = 'False'
config['SUMMARY']['showalertsonsummary'] = 'True'
config['SUMMARY']['showtideonsummary'] = 'False'
config['SUMMARY']['showyesterdayonsummary'] = 'False'
config['VERBOSITY']['verbosity'] = 'False'
config['VERBOSITY']['json_verbosity'] = 'False'
config['VERBOSITY']['setup_verbosity'] = 'False'
config['VERBOSITY']['setup_jsonverbosity'] = 'False'
config['VERBOSITY']['updater_verbosity'] = 'False'
config['VERBOSITY']['updater_jsonverbosity'] = 'False'
config['VERBOSITY']['keybackup_verbosity'] = 'False'
config['VERBOSITY']['configdefault_verbosity'] = 'False'
config['TRACEBACK']['tracebacks'] = 'False'
config['TRACEBACK']['setup_tracebacks'] = 'False'
config['TRACEBACK']['updater_tracebacks'] = 'False'
config['TRACEBACK']['configdefault_tracebacks'] = 'False'
config['UI']['show_entertocontinue'] = 'True'
config['UI']['detailedinfoloops'] = '6'
config['UI']['forecast_detailedinfoloops'] = '5'
config['UI']['show_completediterations'] = 'False'
config['UI']['alerts_usiterations'] = '1'
config['UI']['alerts_euiterations'] = '2'
config['UI']['extratools_enabled'] = 'False'
config['PREFETCH']['10dayfetch_atboot'] = 'False'
config['PREFETCH']['yesterdaydata_atboot'] = 'False'
config['UPDATER']['autocheckforupdates'] = 'False'
config['UPDATER']['show_updaterreleasetag'] = 'False'
config['KEYBACKUP']['savedirectory'] = 'backup//'
config['PYWEATHER BOOT']['validateapikey'] = 'True'
config['UPDATER']['showReleaseNotes'] = 'True'
config['UPDATER']['showReleaseNotes_uptodate'] = 'False'
config['UPDATER']['showNewVersionReleaseDate'] = 'True'
config['USER']['configprovisioned'] = 'True'
config['CACHE']['enabled'] = 'True'
config['CACHE']['alerts_cachedtime'] = '5'
config['CACHE']['current_cachedtime'] = '10'
config['CACHE']['threedayhourly_cachedtime'] = '60'
config['CACHE']['tendayhourly_cachedtime'] = '60'
config['CACHE']['forecast_cachedtime'] = '60'
config['CACHE']['almanac_cachedtime'] = '240'
config['CACHE']['sundata_cachedtime'] = '480'
config['CACHE']['tide_cachedtime'] = '480'
config['CACHE']['hurricane_cachedtime'] = '180'
config['CACHE']['yesterday_cachedtime'] = '720'
config['RADAR GUI']['radar_imagesize'] = 'normal'
config['RADAR GUI']['bypassconfirmation'] = 'False'
config['GEOCODER']['scheme'] = 'https'
config['GEOCODER API']['customkey_enabled'] = 'False'
config['GEOCODER API']['customkey'] = 'None'
config['PREFETCH']['hurricanedata_atboot'] = 'False'
config['FIRSTINPUT']['geoipservice_enabled'] = 'False'
config['FIRSTINPUT']['allow_pwsqueries'] = 'True'
config['HURRICANE']['enablenearestcity'] = 'False'
config['HURRICANE']['enablenearestcity_forecast'] = 'False'
config['HURRICANE']['api_username'] = 'pyweather_proj'
config['HURRICANE']['nearestcitysize'] = 'medium'
config['FAVORITE LOCATIONS']['enabled'] = 'True'
config['FAVORITE LOCATIONS']['favloc1'] = 'None'
config['FAVORITE LOCATIONS']['favloc2'] = 'None'
config['FAVORITE LOCATIONS']['favloc3'] = 'None'
config['FAVORITE LOCATIONS']['favloc4'] = 'None'
config['FAVORITE LOCATIONS']['favloc5'] = 'None'
config['FAVORITE LOCATIONS']['favloc1_data'] = 'None'
config['FAVORITE LOCATIONS']['favloc2_data'] = 'None'
config['FAVORITE LOCATIONS']['favloc3_data'] = 'None'
config['FAVORITE LOCATIONS']['favloc4_data'] = 'None'
config['FAVORITE LOCATIONS']['favloc5_data'] = 'None'
config['PREVIOUS LOCATIONS']['enabled'] = 'True'
config['PREVIOUS LOCATIONS']['prevloc1'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc2'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc3'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc4'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc5'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc1_data'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc2_data'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc3_data'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc4_data'] = 'None'
config['PREVIOUS LOCATIONS']['prevloc5_data'] = 'None'
try:
with open('storage//config.ini', 'w') as configfile:
config.write(configfile)
except:
print("Hmmf...an odd error occurred. A full traceback will be",
"printed below. Please report this issue on GitHub",
"(github.com/o355/pyweather), as that would be greatly appreciated",
"for trying to fix the bug that you just encountered!", sep="\n")
traceback.print_exc()
# Giving users choice, unlike Microsoft.
print("Would you like to continue using PyWeather with an unprovisioned config?",
"It's highly recommended you don't continue, as you may encounter",
"unexpected errors and issues with using PyWeather. Yes or No.", sep="\n")
provisionfailed_continue = input("Input here: ").lower()
if provisionfailed_continue == "yes":
print("Continuing with PyWeather Setup. Please remember, you may encounter",
"unexpected errors and issues. You can always retry provisioning your config",
"by using the configsetup.py script in the storage folder.", sep="\n")
elif provisionfailed_continue == "no":
print("Stopping PyWeather Setup. You can retry to provision your config by using",
"the configsetup.py script in the storage folder.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
else:
print("Couldn't understand your input. By default, PyWeather Setup is stopping.",
"You can retry to provision your config by using the configsetup.py script",
"in the storage folder. Press enter to exit.", sep="\n")
input()
sys.exit()
# See if the config is "provisioned". If it isn't, a KeyError will occur,
# because it's not created. Here, we set up the config to defaults if it's not
# provisioned.
try:
configprovisioned = config.getboolean('USER', 'configprovisioned')
except:
print("Your config likely isn't provisioned. Would you like to provision your config?",
"It's highly recommended you provision your config. If you decide not to,",
"you may run into issues using PyWeather.",
"Yes or No.", sep="\n")
provisionconfig = input("Input here: ").lower()
if provisionconfig == "yes":
print("Provisioning your config.")
configprovision()
print("Config file provisioned successfully! Moving on with PyWeather setup...")
elif provisionconfig == "no":
print("Not provisioning your config. You may encounter unexpected errors",
"and issues when using PyWeather, however.", sep="\n")
else:
print("Couldn't understand your input. By default, I'm going to provision",
"your config. Beginning now...", sep="\n")
configprovision()
print("Config file provisioned successfully! Moving on with PyWeather setup...")
try:
verbosity = config.getboolean('VERBOSITY', 'setup_verbosity')
jsonVerbosity = config.getboolean('VERBOSITY', 'setup_jsonverbosity')
tracebacksEnabled = config.getboolean('TRACEBACK', 'setup_tracebacks')
except:
print("Couldn't load your config file. Make sure there aren't any typos",
"in the config, and that the config file is accessible.",
"Setting config variables to their defaults.",
"Here's the full traceback, in case you need it.", sep="\n")
traceback.print_exc()
verbosity = False
jsonVerbosity = False
tracebacksEnabled = False
def printException():
if tracebacksEnabled == True:
print("Here's the full traceback (for error reporting):")
traceback.print_exc()
def printException_loggerwarn():
if verbosity == True:
logger.warning("Oh snap! We ran into a non-critical error. Here's the traceback.")
traceback.print_exc()
logger = logging.getLogger(name='pyweather_setup_0.6.2beta')
logger.setLevel(logging.DEBUG)
logformat = '%(asctime)s | %(levelname)s | %(message)s'
logging.basicConfig(format=logformat)
if verbosity == True:
logger.setLevel(logging.DEBUG)
elif tracebacksEnabled == True:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.CRITICAL)
logger.debug("Listing configuration options:")
logger.debug("verbosity: %s ; jsonVerbosity: %s" %
(verbosity, jsonVerbosity))
logger.debug("tracebacksEnabled: %s" %
tracebacksEnabled)
print("Hi! Welcome to PyWeather 0.6.3 beta! Glad that you're here.",
"I'm here to help set up PyWeather, and let you configure it to your liking.",
"Let's begin!", sep="\n")
import shutil
import time
import json
import codecs
buildnumber = 63
buildversion = "0.6.3 beta"
logger.debug("buildnumber: %s ; buildversion: %s" %
(buildnumber, buildversion))
print("","Before we get started, I want to confirm some permissions from you.",
"Is it okay if I use 1-5 MB of data (downloading libraries), save a small",
"text file called apikey.txt (under 2 KB), and automatically install Python",
"libraries?",
"Please input yes or no below:", sep="\n")
confirmPermissions = input("Input here: ").lower()
logger.debug("confirmPermissions: %s" % confirmPermissions)
if confirmPermissions == "no":
logger.debug("User denied permissions. Closing...")
print("Okay! Closing now.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
elif confirmPermissions != "yes":
logger.debug("Couldn't understand. Closing...")
print("I couldn't understand what you said.",
"As a precaution, I won't proceed any further.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
print("","Cool! Let's start.",
"I'm going to start by checking for necessary libraries (to run PyWeather).",
"This can take a moment, so please hold tight while I check!", sep="\n")
try:
import pip
except ImportError:
logger.warn("pip is NOT installed! Asking user for automated install...")
printException_loggerwarn()
print("","Shucks! PIP couldn't be imported, and I need PIP to install",
"libraries for you. Would you like me to install PIP for you?",
"Yes or No.", sep="\n")
pipConfirm = input("Input here: ").lower()
logger.debug("pipConfirm: %s" % pipConfirm)
if pipConfirm == "no":
logger.info("User denied PIP install, closing...")
print("","Okay! I'm closing setup, as I need PIP to continue.",
"Press enter to continue.", sep="\n")
input()
sys.exit()
elif pipConfirm == "yes":
logger.info("User allowed PIP install. Starting...")
print("","Okay!",
"I'll download PIP's installer, and run it.",
"Doing such uses about 2-4 MB of data, and will quit PW setup.",
"When the setup script finishes, you'll need to run the setup script again."
"I'll start in a few seconds.", sep="\n")
time.sleep(3)
print("Downloading the installer...")
# We use the built-in urllib library, as some Python installs don't include requests.
try:
with urllib.request.urlopen('https://bootstrap.pypa.io/get-pip.py') as update_response, open('get-pip.py',
'wb') as update_out_file:
logger.debug("update_response: %s ; update_out_file: %s"
% (update_response, update_out_file))
shutil.copyfileobj(update_response, update_out_file)
except:
print("Couldn't download the PIP installer, either due to no internet connection, or the library that fetches",
"files has failed. As an alternative, you can download the installer yourself.",
"Please download this file: 'https://bootstrap.pypa.io/get-pip.py', and place it in PyWeather's base directory.",
"Afterwards, press enter to execute the installer. Press Control + C to exit.", sep="\n")
printException()
input()
print("Running the installer...")
logger.debug("Executing get-pip.py. If this script exits, please restart the setup script.")
exec(open("get-pip.py").read())
else:
logger.warn("Couldn't understand the input. Closing...")
print("","I didn't understand what you said.",
"As a precaution, I'm closing setup, as I need PIP to continue.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
except PermissionError:
traceback.print_exc()
print("PIP has incorrect permissions on your machine. Please attempt to fix",
"permissions on the folder that is listed in the traceback.",
"Linux users: Use sudo chown -R <yourusername> <folder>, this should fix the issue.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
print("Deleting the PIP installer file (if it exists)")
try:
os.remove("get-pip.py")
except:
printException_loggerwarn()
print("The file get-pip.py didn't exist, or we had wrong permissions.")
neededLibraries = 0
try:
import colorama
coloramaInstalled = True
logger.info("Colorama is installed.")
logger.debug("coloramaInstalled: %s" % coloramaInstalled)
except ImportError:
coloramaInstalled = False
neededLibraries = neededLibraries + 1
logger.warn("Colorama is not installed.")
printException_loggerwarn()
logger.debug("coloramaInstalled: %s ; neededLibraries: %s"
% (coloramaInstalled, neededLibraries))
try:
import geopy
geopyInstalled = True
logger.info("geopy is installed.")
logger.debug("geopyInstalled: %s" % geopyInstalled)
except ImportError:
geopyInstalled = False
neededLibraries = neededLibraries + 1
logger.info("geopy is NOT installed.")
printException_loggerwarn()
logger.debug("geopyInstalled: %s ; neededLibraries: %s"
% (geopyInstalled, neededLibraries))
try:
from appJar import gui
appjarInstalled = True
logger.info("appjar is installed.")
logger.debug("appjarInstalled: %s" % appjarInstalled)
except ImportError as e:
if e == "No module named '_tkinter', please install the python3-tk package":
print("appJar cannot run on this platform. Skipping installation...")
appjarInstalled = True
logger.debug("appjarInstalled: %s" % appjarInstalled)
else:
appjarInstalled = False
neededLibraries = neededLibraries + 1
logger.debug("appJar is NOT installed.")
printException_loggerwarn()
logger.debug("appjarInstalled: %s ; neededLibraries: %s" %
(appjarInstalled, neededLibraries))
try:
import requests
requestsInstalled = True
logger.debug("requests is installed.")
logger.debug("requestsInstalled: %s" % requestsInstalled)
except:
requestsInstalled = False
neededLibraries = neededLibraries + 1
logger.debug("requests is NOT installed.")
printException_loggerwarn()
logger.debug("requestsInstalled: %s ; neededLibraries: %s" %
(requestsInstalled, neededLibraries))
try:
import halo
haloInstalled = True
logger.debug("halo is installed.")
logger.debug("haloInstalled: %s" % haloInstalled)
except:
haloInstalled = False
neededLibraries += 1
logger.debug("halo is NOT installed.")
printException_loggerwarn()
logger.debug("haloInstalled: %s ; neededLibraries: %s" %
(haloInstalled, neededLibraries))
print("All done!")
if neededLibraries == 0:
logger.debug("All libraries are installed.")
print("All necessary libraries have been installed!")
else:
logger.debug("Libraries need to be installed.")
print("Shucks. Not all necessary libraries are installed. Here's what needs to be installed:")
if coloramaInstalled is False:
print("- Colorama")
if geopyInstalled is False:
print("- Geopy")
if appjarInstalled is False:
print("- appJar")
if requestsInstalled is False:
print("- Requests")
if haloInstalled is False:
print("- Halo")
print("If you want me to, I can automatically install these libraries.",
"Would you like me to do such? Yes or No.", sep="\n")
neededLibrariesConfirm = input("Input here: ").lower()
logger.debug("neededLibrariesConfirm: %s" % neededLibrariesConfirm)
if neededLibrariesConfirm == "no":
logger.warning("Not installing necessary libraries. Now exiting...")
print("Okay. I needed to install necessary libraries to continue.",
"Now quitting...",
"Press enter to exit.", sep="\n")
input()
sys.exit()
elif neededLibrariesConfirm == "yes":
print("Now installing necessary libraries...")
if coloramaInstalled is False:
print("Installing Colorama...")
pip.main(['install', 'colorama'])
if geopyInstalled is False:
print("Installing geopy...")
pip.main(['install', 'geopy'])
if appjarInstalled is False:
print("Installing appJar...")
pip.main(['install', 'appJar'])
if requestsInstalled is False:
print("Installing requests...")
pip.main(['install', 'requests'])
if haloInstalled is False:
print("Installing halo...")
pip.main(['install', 'halo'])
logger.info("Running the double check on libraries...")
print("Sweet! All libraries should be installed.",
"Just to confirm, I'm double checking if needed libraries are installed.", sep="\n")
try:
import colorama
logger.info("Colorama installed successfully.")
except ImportError:
logger.warn("colorama was not installed successfully.")
print("Hmm...Colorama didn't install properly.")
printException()
print("As a last resort, we can use sudo -H to install packages.",
"Do you want to use the shell option to install colorama?",
"WARNING: Using the last-resort method may screw up PIP, and",
"may require you to reinstall PIP on your machine."
"Yes or No.", sep="\n")
colorama_lastresort = input("Input here: ").lower()
logger.debug("colorama_lastresort: %s" % colorama_lastresort)
if colorama_lastresort == "yes":
try:
print("Now executing `sudo -H pip3 install colorama`.",
"Please enter the password for sudo when the prompt",
"comes up. Press Control + C to cancel.",
"Starting in 5 seconds...", sep="\n")
time.sleep(5)
try:
subprocess.call(["sudo -H pip3 install colorama"], shell=True)
try:
print("Attempting to reimport colorama.")
import colorama
print("Colorama is FINALLY installed!")
except:
print("Colorama still wasn't successfully installed.",
"Cannot continue without Colorama.",
"Try doing a manual install of Colorama with PIP.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
except:
print("When running the command, an error occurred",
"Try doing a manual install of Colorama with PIP.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
except KeyboardInterrupt:
print("Command execution aborted.",
"Cannot continue without Colorama.",
"Try and do a manual install of Colorama with PIP",
"in a command line.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
elif colorama_lastresort == "no":
print("Not installing Colorama with a shell command.",
"Cannot continue without Colorama.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
else:
print("Did not understand your input. Defaulting to not installing",
"via the shell. Cannot continue without Colorama.",
"Try installing Colorama with PIP.",
"Press enter to exit.")
input()
sys.exit()
try:
import geopy
logger.info("geopy installed successfully.")
except ImportError:
logger.warn("geopy was not installed successfully.")
print("Hmm...geopy didn't install properly.")
printException()
print("As a last resort, we can use sudo -H to install packages.",
"Do you want to use the shell option to install geopy?",
"WARNING: Using the last-resort method may screw up PIP, and",
"may require you to reinstall PIP on your machine."
"Yes or No.", sep="\n")
geopy_lastresort = input("Input here: ").lower()
logger.debug("geopy_lastresort: %s" % geopy_lastresort)
if geopy_lastresort == "yes":
try:
print("Now executing `sudo -H pip3 install geopy`.",
"Please enter the password for sudo when the prompt",
"comes up. Press Control + C to cancel.",
"Starting in 5 seconds...", sep="\n")
time.sleep(5)
try:
subprocess.call(["sudo -H pip3 install geopy"], shell=True)
try:
print("Attempting to reimport geopy.")
import geopy
print("Geopy is FINALLY installed!")
except:
print("Geopy still wasn't successfully installed.",
"Cannot continue without geopy.",
"Try doing a manual install of geopy with PIP.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
except:
print("When running the command, an error occurred",
"Try doing a manual install of geopy with PIP.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
except KeyboardInterrupt:
print("Command execution aborted.",
"Cannot continue without geopy.",
"Try and do a manual install of geopy with PIP",
"in a command line.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
elif geopy_lastresort == "no":
print("Not installing geopy with a shell command.",
"Cannot continue without geopy.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
else:
print("Did not understand your input. Defaulting to not installing",
"via the shell. Cannot continue without geopy.",
"Try installing geopy with PIP.",
"Press enter to exit.")
input()
sys.exit()
# Why is appJar not here? When appJar is straight up imported in a non-GUI environment, it'll throw an error
# even when it's installed. I don't check for an install because of this reason.
try:
import requests
logger.info("requests installed successfully.")
except ImportError:
logger.warning("Requests was not installed successfully.")
print("Hmm...requests didn't install properly.")
printException()
print("As a last resort, we can use sudo -H to install packages.",
"Do you want to use the shell option to install requests?",
"WARNING: Using the last-resort method may screw up PIP, and",
"may require you to reinstall PIP on your machine."
"Yes or No.", sep="\n")
requests_lastresort = input("Input here: ").lower()
logger.debug("requests_lastresort: %s" % requests_lastresort)
if requests_lastresort == "yes":
try:
print("Now executing `sudo -H pip3 install requests`.",
"Please enter the password for sudo when the prompt",
"comes up. Press Control + C to cancel.",
"Starting in 5 seconds...", sep="\n")
time.sleep(5)
try:
subprocess.call(["sudo -H pip3 install requests"], shell=True)
try:
# Fun fact: This is inside THREE try/except things.
print("Attempting to reimport requests.")
import requests
print("requests is FINALLY installed!")
except:
print("requests still wasn't successfully installed.",
"Cannot continue without requests.",
"Try doing a manual install of requests with PIP.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
except:
print("When running the command, an error occurred",
"Try doing a manual install of requests with PIP.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
except KeyboardInterrupt:
print("Command execution aborted.",
"Cannot continue without appJar.",
"Try and do a manual install of requests with PIP",
"in a command line.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
elif requests_lastresort == "no":
print("Not installing appJar with a shell command.",
"Cannot continue without requests.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
else:
print("Did not understand your input. Defaulting to not installing",
"via the shell. Cannot continue without requests.",
"Try installing requests with PIP.",
"Press enter to exit.")
input()
sys.exit()
try:
import halo
logger.info("Halo installed successfully.")
except ImportError:
logger.warn("halo was not installed successfully.")
print("Hmm...Halo didn't install properly.")
printException()
print("As a last resort, we can use sudo -H to install packages.",
"Do you want to use the shell option to install halo?",
"WARNING: Using the last-resort method may screw up PIP, and",
"may require you to reinstall PIP on your machine."
"Yes or No.", sep="\n")
halo_lastresort = input("Input here: ").lower()
logger.debug("halo_lastresort: %s" % halo_lastresort)
if halo_lastresort == "yes":
try:
print("Now executing `sudo -H pip3 install halo`.",
"Please enter the password for sudo when the prompt",
"comes up. Press Control + C to cancel.",
"Starting in 5 seconds...", sep="\n")
time.sleep(5)
try:
subprocess.call(["sudo -H pip3 install halo"], shell=True)
try:
print("Attempting to reimport halo.")
import colorama
print("Halo is now installed!")
except:
print("Halo still wasn't successfully installed.",
"Cannot continue without Halo.",
"Try doing a manual install of Halo with PIP.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
except:
print("When running the command, an error occurred",
"Try doing a manual install of Halo with PIP.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
except KeyboardInterrupt:
print("Command execution aborted.",
"Cannot continue without Halo.",
"Try and do a manual install of Halo with PIP",
"in a command line.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
elif halo_lastresort == "no":
print("Not installing Halo with a shell command.",
"Cannot continue without Halo.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
else:
print("Did not understand your input. Defaulting to not installing",
"via the shell. Cannot continue without Halo.",
"Try installing Halo with PIP.",
"Press enter to exit.")
input()
sys.exit()
print("","All libraries are installed!", sep="\n")
else:
logger.warn("Input was not understood. Closing...")
print("Your input wasn't understood for if you wanted to automatically import libraries.",
"As a precaution PyWeather Setup needs to now close. Press enter to exit.", sep="\n")
input()
sys.exit()
# Previously this updated all your pip packages. I then did this on my NAS (on FreeNAS 11).
# It broke my NAS! Woo hoo!
print("", "Would you like PyWeather to automatically update it's required packages?",
"Doing this is generally recommended, and will have benefits down the line when",
"some libraries fix known issues that occur in PyWeather. Yes or No.", sep="\n")
confirm_updatepip = input("Input here: ").lower()
logger.debug("confirm_updatepip: %s" % confirm_updatepip)
if confirm_updatepip == "yes":
print("")
print("Updating PIP packages.")
totalpackages = 5
updatecount = 1
pip_requiredlibraries = ['requests', 'halo', 'appjar', 'colorama', 'geopy']
for pkgname in pip_requiredlibraries:
print("Now updating package: %s (Update %s/%s)" %
(pkgname, updatecount, totalpackages))
pip.main(['install', '--upgrade', '%s' % pkgname])
updatecount = updatecount + 1
elif confirm_updatepip == "no":
print("Not updating PIP packages. You may run into issues with non-updated",
"packages in future versions of PyWeather.")
else:
print("Input not understood, not updating PIP packages. You may run into",
"issues with non-updated packages in future versions of PyWeather.")
# Verbosity is not needed here.
print("I'm now going to guide you through obtaining an API key.",
"Please carefully read my detailed instructions, so you don't mess anything up.", sep="\n")
print("","If you know how to acquire a Wunderground API key, or are resetting PyWeather,",
"hit enter 14 times to get to the API key entry.", sep="\n")
print("Let's begin.",
"Start by opening a web browser, and going to https://www.wunderground.com/weather/api/.",
"Press any key when you are done.", sep="\n")
input()
print("Next, click the 'Explore my options' button.",
"Press any key when you are done.", sep="\n")
input()
print("Next, click the small button next to 'ANVIL PLAN'.",
"After that, confirm that the total underneath the 'Purchase Key' button says",
"'$0 USD per month'.",
"If the total underneath the 'Purchase Key' button doesn't",
"say '$0 USD per month, please ensure that the small button next to 'Developer'",
"on the table in the middle of the screen is selected, and the total",
"says '$0 USD per month'",
"Press any key when you are done.", sep="\n")
input()
print("Next, click the 'Purchase Key' button.",
"Press any key when you are done.", sep="\n")
input()
print("Next, input your email, and a password to sign up for a Weather",
"Underground account.",
"Be sure to select the checkbox next to 'I agree to the Terms of Service'",
"It's best if you leave the checkbox next to 'I would like to receive WU",
"updates via email' unchecked.",
"Press any key when you are done and ready.", sep="\n")
input()
print("Next, press the 'Sign up for free' button.",
"When the welcome window pops up, be sure to click the X button at the top right of the popup.",
"When clicking the X, you should be redirected to wunderground.com.",
"Press any key when you are done and ready.", sep="\n")
input()
print("Next, click 'My Profile' at the top right corner of the homepage.",
"In the dropdown, click 'My Email & Text Alerts'",
"Press any key when you are done and ready.", sep="\n")
input()
print("Next, next to your email listed on the page, click the 'Edit / Verify' button.",
"After you click the button, click the 'Verify Email' button.",
"Press any key when you are done and ready.", sep="\n")
input()
print("Next, check your email in which you signed up with.",
"If you got a letter from Weather Underground, titled 'Daily Forecast",
"Email Verification', open that letter, and click the link.",
"If you didn't get the letter, wait a few minutes, and be sure to check your spam folder.",
"Hint: If you followed this guide exactly, WU will not be sending you daily forecasts to your email.",
"Press any key when you are done and ready.", sep="\n")
input()
print("Your email should be verified.",
"Next, in your web browser, head back to https://www.wunderground.com/weather/api/.",
"Then, click the 'Explore my Options' button, again.",
"Press any key when you are done and ready.", sep="\n")
input()
print("Next, at the top of the page, make sure the button next to 'ANVIL PLAN'",
"is selected.",
"After that, confirm that the total underneath the 'Purchase Key' button says",
"'$0 USD per month'",
"If the total doesn't say that, in the pricing table, make sure the button",
"next to 'Developer' is selected.",
"Press any key when you are done and ready.", sep="\n")
input()
print("Next, click the 'Purchase Key' button, on top of your total (which",
"should be $0 USD per month)",
"Next, fill out the form, considering these tips:",
"For the contact name/email, it's recommended you use your real name",
"(first name last initial is fine).",
"It's also recommended that you use your real email.",
"For the project name, put in something generic, like 'to use a script that",
"uses WU's API', or 'WU API test'. It's up to you.",
"For the project website, put in something generic, like 'google.com', or",
"some other site you feel like having as the project site.",
"For the question 'Where will the API be used', answer Other.",
"For the question 'Will the API be used for commercial use?', answer No.",
"For the question 'Will the API be used for manufacturing mobile chip",
"processing?', answer No.",
"Answer yes if you somehow are manufacturing mobile chip processing. I doubt",
"you are, however.",
"For the country that you are based in, put your location.",
"Before we move on, fill out these forms, and press any key when you are done "
"and ready.", sep="\n")
input()
print("Next, for the brief description, put something like 'using an API key",
"to use a script using Wunderground'.",
"After that, check both boxes at the bottom of the page. Read the ToS if you",
"feel like it.",
"Finally, click 'Purchase Key'.",
"You should land on a page that says 'Edit API Key'.",
"Press any key when you are done and ready.", sep="\n")
input()
print("In the table to the left of the page, copy the text that's under Key ID.",
"(Ctrl+C, right click)",
"I'm now going to ask you to input the API key into the text entry below.",
"The API key will be saved to storage/apikey.txt, so PyWeather can easily",
"pull it up.",
"Press any key when you are done and ready.", sep="\n")
input()
print("Please input your API key below.")
apikey_input = input("Input here: ")
logger.debug("apikey_input: %s" % apikey_input)
print("", "Just to confirm, the API key you gave me was: " + apikey_input
+ ".", sep="\n")
print("Please double check your input, and confirm in the dialogue below.")
apikey_confirm = input("Is the API key right? Yes or no: ").lower()
logger.debug("apikey_confirm: %s" % apikey_confirm)
if apikey_confirm == "no":
while True:
logger.debug("User now re-entering key...")
print("","Please input your API key below.", sep="\n")
apikey_input = input("Input here: ")
logger.debug("apikey_input: %s" % apikey_input)
print("Just to confirm, the API key you gave me was: " + apikey_input
+ ".")
apikey_confirm = input("Is the API key right? Yes or no: ").lower()
if apikey_confirm == "yes":
break
elif apikey_confirm == "no":
continue
else:
print("Couldn't understand your input.",
"I'll assume the API key is correct, moving on.", sep="\n")
print("Now saving your API key...")
open('storage//apikey.txt', 'w').close()
with open("storage//apikey.txt", 'a') as out:
logger.debug("out: %s" % out)
out.write(apikey_input)
out.close()
logger.debug("Performed ops: overwrite apikey.txt, out.write(apikey_input), out.close()")
print("", "I can also back up your API key, in case you do something wrong.",
sep="\n")
# A future release should bring customization as to the storage location.
print("Would you like me to save a backup? Yes or no.")
backup_APIkey = input("Input here: ").lower()
if backup_APIkey == "yes":
print("","Where would you want me to backup the key to?",
"This is a directory. If I wanted my key at directory/backkey.txt,",
"You would enter 'directory'. The default directory is 'backup'.", sep="\n")
# Doing a .lower() here to prevent case insensitiveness.
backup_APIkeydirectory = input("Input here: ").lower()
folder_argument = backup_APIkeydirectory + "//backkey.txt"
backup_APIkeydirectory2 = backup_APIkeydirectory + "//"
logger.debug("backup_APIkeydirectory: %s ; backup_APIkeydirectory2: %s" %
(backup_APIkeydirectory, backup_APIkeydirectory2))
logger.debug("folder_argument: %s" % folder_argument)
# These two variables will get reset if the directory is backup, or empty.
if backup_APIkeydirectory == "backup" or backup_APIkeydirectory == "":
print("Using the default directory of //backup.")
folder_argument = "backup//backkey.txt"
backup_APIkeydirectory2 = "backup//"
logger.debug("folder_argument: %s ; backup_APIkeydirectory2: %s" %
(folder_argument, backup_APIkeydirectory2))
elif backup_APIkeydirectory != "backup":
try:
os.mkdir(backup_APIkeydirectory2)
except:
printException_loggerwarn()
print("Couldn't make the directory, does it exist?")
# Overwrite the file, if it exists.
open(folder_argument, 'w').close()
open(folder_argument, 'a').write(apikey_input)
open(folder_argument).close()
config['KEYBACKUP']['savedirectory'] = backup_APIkeydirectory2
print("The API key was backed up successfully!")
logger.debug("Performed 3 ops. Overwrite "+ folder_argument + "backkey.txt, write to backkey.txt" +
", and close backkey.txt.")
print("", "Before we configure PyWeather, I'll now validate your API key.", sep="\n")
# Do an infinite loop of validation of the API key, so the user can reenter the API key
# if it was wrong.
while True:
apitest_URL = 'http://api.wunderground.com/api/' + apikey_input + '/conditions/q/NY/New_York.json'
testreader = codecs.getreader("utf-8")
logger.debug("apitest_URL: %s ; testreader: %s" %
(apitest_URL, testreader))
try:
testJSON = requests.get(apitest_URL)
logger.debug("testJSON: %s" % testJSON)
except:
logger.warn("Couldn't connect to Wunderground's API! No internet?")
print("When PyWeather Setup attempted to fetch the .json to validate your API key,",
"it ran into an error. If you're on a network with a filter, make sure that",
"'api.wunderground.com' is unblocked. Otherwise, make sure you have an internet",
"connection.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
test_json = json.loads(testJSON.text)
if jsonVerbosity == True:
logger.debug("test_json: %s" % test_json)
try:
test_conditions = str(test_json['current_observation']['temp_f'])
logger.debug("test_conditions: %s" % test_conditions)
print("Hurray! Your API key is valid and works.")
break
except:
logger.warn("Error! Is the API key invalid?")
print("When attempting to validate the API key that you entered/confirmed,",
"PyWeather ran into an error. Would you like to reenter your API key to revalidate it?",
"Please note, that this error might be caused by WU's API being down, or another cause.",
"However, 90% of the time, this is due to a bad API key.",
"Yes or No.", sep='\n')
revalidateAPIkey = input("Input here: ").lower()
if revalidateAPIkey == "yes":
print("Enter in your API key below.")
apikey_input = input("Input here: ")
logger.debug("apikey_input: %s")
print("Revalidating your API key...")
continue
elif revalidateAPIkey == "no":
print("Not revalidating your API key. You'll need a valid API key to continue.",
"Press enter to exit.", sep="\n")
input()
sys.exit()
printException()
print("Press enter to exit.")
input()
sys.exit()
print("Let's configure PyWeather to your liking.")
logger.debug("config: %s" % config)
print("", "(1/42)","On the summary screen, would you like to show sunrise/sunset times?",
"By default, this is disabled.",
"Yes or No.", sep="\n")
sundata_Summary = input("Input here: ").lower()
logger.debug("sundata_Summary: %s" % sundata_Summary)
if sundata_Summary == "yes":
config['SUMMARY']['sundata_summary'] = 'True'
print("Changes saved.")
logger.debug("Sundata on the summary is now ENABLED.")
elif sundata_Summary == "no":
config['SUMMARY']['sundata_summary'] = 'False'
print("Changes saved.")
logger.debug("Sundata on the summary is now DISABLED.")
else:
print("Could not understand what you inputted.",
"Defaulting to 'False'", sep="\n")
config['SUMMARY']['sundata_summary'] = 'False'
print("Changes saved.")
logger.debug("Could not recognize input. Defaulting to DISABLED.")
print("", "(2/42)","On the summary screen, would you like to show almanac data?",
"By default, this is disabled.",
"Yes or no:", sep="\n")
almanacdata_Summary = input("Input here: ").lower()
logger.debug("almanacdata_Summary: %s" % almanacdata_Summary)
if almanacdata_Summary == "yes":
config['SUMMARY']['almanac_summary'] = 'True'
print("Changes saved.")
logger.debug("Almanac on the summary is now ENABLED.")
elif almanacdata_Summary == "no":
config['SUMMARY']['almanac_summary'] = 'False'
print("Changes saved.")
logger.debug("Almanac on the summary is now DISABLED.")
else:
print("Could not understand what you inputted.",
"Defaulting to 'False'", sep="\n")
config['SUMMARY']['almanac_summary'] = 'False'
print("Changes saved.")
logger.debug("Could not recognize input. Defaulting to DISABLED.")
print("", "(3/42)", "On the summary screen, would you like to show alerts data?",
"By default, this is enabled. Please note, Wunderground",
"only supports alert data in the US and EU at this time.",
"Yes or No.", sep="\n")
alertsdata_Summary = input("Input here: ").lower()
logger.debug("alertsdata_Summary: %s" % alertsdata_Summary)
if alertsdata_Summary == "yes":
config['SUMMARY']['showalertsonsummary'] = 'True'
print("Changes saved.")
logger.debug("Alerts on the summary is now ENABLED.")
elif alertsdata_Summary == "no":
config['SUMMARY']['showalertsonsummary'] = 'False'
print("Changes saved.")
logger.debug("Alerts on the summary is now DISABLED.")
else:
print("Could not understand what you inputted.",
"Defaulting to 'True'", sep="\n")
config['SUMMARY']['showAlertsOnSummary'] = 'True'
print("", "(4/42)","On boot, would you like PyWeather to check for updates?",
"By default, this is disabled, due to a load time increase of ~2-5 seconds.",
"Yes or No.", sep="\n")
checkForUpdates = input("Input here: ").lower()
logger.debug("checkForUpdates: %s" % checkForUpdates)
if checkForUpdates == "yes":
config['UPDATER']['autoCheckForUpdates'] = 'True'
print("Changes saved.")
logger.debug("Checking for updates on startup is ENABLED.")
elif checkForUpdates == "no":
config['UPDATER']['autoCheckForUpdates'] = 'False'
print("Changes saved.")
logger.debug("Checking for updates on startup is DISABLED.")
else:
print("Could not understand what you inputted.",
"Defaulting to 'False'", sep="\n")
config['UPDATER']['autoCheckForUpdates'] = 'False'
print("Changes saved.")
logger.debug("Could not recognize input. Defaulting to DISABLED.")
print("", "(5/42)","When an error occurs, would you like PyWeather to show the full error?",
"When enabled, you'll have easier access to the full error for reporting",
"the bug on GitHub.",
"By default, this is disabled, as errors look less pretty when enabled.",
"Yes or no.", sep="\n")
displayTracebacks = input("Input here: ").lower()
logger.debug("displayTracebacks: %s" % displayTracebacks)
if displayTracebacks == "yes":
config['TRACEBACK']['tracebacks'] = 'True'
config['TRACEBACK']['setup_tracebacks'] = 'True'
config['TRACEBACK']['updater_tracebacks'] = 'True'
config['TRACEBACK']['keybackup_tracebacks'] = 'True'
config['TRACEBACK']['configdefault_tracebacks'] = 'True'
print("Changes saved.")
logger.debug("Printing tracebacks is ENABLED.")
elif displayTracebacks == "no":
config['TRACEBACK']['tracebacks'] = 'False'
config['TRACEBACK']['setup_tracebacks'] = 'False'
config['TRACEBACK']['updater_tracebacks'] = 'False'
config['TRACEBACK']['keybackup_tracebacks'] = 'False'
config['TRACEBACK']['configdefault_tracebacks'] = 'False'
print("Changes saved.")
logger.debug("Printing tracebacks is DISABLED.")
else:
print("Couldn't understand what you inputted.",
"Defaulting to 'False'", sep="\n")
config['TRACEBACK']['tracebacks'] = 'False'
config['TRACEBACK']['setup_tracebacks'] = 'False'
config['TRACEBACK']['updater_tracebacks'] = 'False'
config['TRACEBACK']['keybackup_tracebacks'] = 'False'
print("Changes saved.")
logger.debug("Could not understand input. Defaulting to DISABLED.")
print("", "(6/42)", "When booting PyWeather up initially, would you like PyWeather to",
"fetch the 10-day hourly forecast, instead of the 3-day forecast?",
"This is disabled by default. When enabled, initial loading times are",
"increased. However, when you view the 10-day hourly forecast, you won't",
"have to wait for it to load, and use another API call.",
"Yes or No.", sep="\n")
tenday_onboot = input("Input here: ").lower()
if tenday_onboot == "yes":
config['PREFETCH']['10dayfetch_atboot'] = 'True'
print("Changes saved.")
logger.debug("Fetching 10 day JSON at boot is ENABLED.")
elif tenday_onboot == "no":
config['PREFETCH']['10dayfetch_atboot'] = 'False'
print("Changes saved.")
logger.debug("Fetching 10 day JSON at boot is DISABLED.")
else:
print("Couldn't understand what you inputted.",
"Defaulting to the default value 'False'", sep="\n")
config['PREFETCH']['10dayfetch_atboot'] = 'False'
print("Changes saved.")
logger.debug("Could not understand input. Defaulting to DISABLED.")
print("", "(7/42)", "When viewing detailed hourly, 10-day hourly, and historical hourly,",
"detailed information, how many iterations should PyWeather go through",
"before asking you to continue?",
"By default, this is 6. An input above 10",
"is not recommended.", sep="\n")
detailedloops = input("Input here: ")
try:
detailedloops = int(detailedloops)
detailedloops = str(detailedloops)
config['UI']['detailedinfoloops'] = detailedloops
print("Changes saved.")
logger.debug("Detailed info iterations now %s." % detailedloops)
except:
print("Couldn't convert input into a number. Defaulting to '6'.")
printException_loggerwarn()
config['UI']['detailedinfoloops'] = '6'
print("Changes saved.")
logger.debug("Detailed info loops now 6.")
print("", "(8/42)", "When viewing detailed 10-day forecast information, how many",
"iterations should PyWeather go through, before asking you to",
"continue?",
"By default, this is 5. An input above 10 will not prompt",
"the enter to continue prompt", sep="\n")
detailedForecastLoops = input("Input here: ")
try:
detailedForecastLoops = int(detailedForecastLoops)
detailedForecastLoops = str(detailedForecastLoops)
config['UI']['forecast_detailedinfoloops'] = detailedForecastLoops
print("Changes saved.")
logger.debug("Detailed forecast info iterations now %s" % detailedForecastLoops)
except:
print("Couldn't convert input into a number. Defaulting to '5'.")
printException_loggerwarn()
config['UI']['forecast_detailedinfoloops'] = '5'
print("Changes saved.")
logger.debug("Detailed forecast info loops now 5.")
print("", "(9/42)", "PyWeather has a caching system, in which if you're gone for some time",
"data will automatically refresh. Would you like to turn this on?",
"This is enabled by default. Yes or No.", sep="\n")
enablecache = input("Input here: ").lower()
if enablecache == "no":
print("Cache will be disabled.")
config['CACHE']['enabled'] = 'False'
print("Changes saved.")
else:
config['CACHE']['enabled'] = 'True'
print("You entered yes, or your input wasn't understood (yes is the default.)",
"In the next few inputs, enter the time in minutes that PyWeather should keep",
"certain types of data, before a data refresh is automatically requested.",
"If you want to leave cache values to their defaults, press enter at any prompt.", sep="\n")
print("", "(10/42)", "Please enter the cache time for alerts data in minutes (default = 5)", sep="\n")
alertscachetime = input("Input here: ").lower()
try:
alertscachetime = float(alertscachetime)
alertscachetime = str(alertscachetime)
config['CACHE']['alerts_cachedtime'] = alertscachetime
print("Changes saved.")
logger.debug("Alerts cache time now %s minutes." % alertscachetime)
except:
print("", "Your input couldn't be converted into a number. Setting alerts",
"cache time to it's default value of '5'.", sep="\n")
config['CACHE']['alerts_cachedtime'] = '5'
logger.debug("Alerts cache time now 5 minutes.")
print("", "(11/42)", "Please enter the cache time for current data in minutes (default = 10)", sep="\n")
currentcachetime = input("Input here: ").lower()
try:
currentcachetime = float(currentcachetime)
currentcachetime = str(currentcachetime)
config['CACHE']['current_cachedtime'] = currentcachetime
print("Changes saved.")
logger.debug("Current cache time now %s minutes." % alertscachetime)
except:
print("", "Your input couldn't be converted into a number. Setting current",
"cache time to it's default value of '10'.", sep="\n")
config['CACHE']['current_cachedtime'] = '10'
logger.debug("Current cache time now 10 minutes.")
print("", "(12/42)", "Please enter the cache time for forecast data in minutes (default = 60)", sep="\n")
forecastcachetime = input("Input here: ").lower()
try:
forecastcachetime = float(forecastcachetime)
forecastcachetime = str(forecastcachetime)
config['CACHE']['forecast_cachedtime'] = forecastcachetime
print("Changes saved.")
logger.debug("Forecast cache time now %s minutes." % forecastcachetime)
except:
print("", "Your input couldn't be converted into a number. Setting forecast",
"cache time to it's default value of '60'.", sep="\n")
config['CACHE']['forecast_cachedtime'] = '60'
logger.debug("Forecast cache time now 60 minutes.")
print("", "(13/42)", "Please enter the cache time for almanac data in minutes (default = 240)", sep="\n")
almanaccachetime = input("Input here: ").lower()
try:
almanaccachetime = float(almanaccachetime)
almanaccachetime = str(almanaccachetime)
config['CACHE']['almanac_cachedtime'] = almanaccachetime
print("Changes saved.")
logger.debug("Almanac cache time now %s minutes." % almanaccachetime)
except:
print("", "Your input couldn't be converted into a number. Setting almanac",
"cache time to it's default value of '240'.", sep="\n")
config['CACHE']['almanac_cachedtime'] = '240'
logger.debug("Almanac cache time now 240 minutes.")
print("", "(14/42)", "Please enter the cache time for 1.5 day hourly data in minutes (default = 60)", sep="\n")
threedayhourly_cachedtime = input("Input here: ").lower()
try:
threedayhourly = float(threedayhourly_cachedtime)
threedayhourly = str(threedayhourly_cachedtime)
config['CACHE']['threedayhourly_cachedtime'] = threedayhourly_cachedtime
print("Changes saved.")
logger.debug("3 day hourly cache time now %s minutes." % threedayhourly_cachedtime)
except:
print("", "Your input couldn't be converted into a number. Setting three day hourly",
"cache time to it's default value of '60'.", sep="\n")
config['CACHE']['threedayhourly_cachedtime'] = "60"
logger.debug("3 day hourly cache time now 60 minutes")
print("", "(15/42)", "Please enter the cache time for the ten day hourly data in minutes (default = 60)", sep="\n")
tendayhourly_cachedtime = input("Input here: ").lower()
try:
tendayhourly = float(tendayhourly_cachedtime)
tendayhourly = str(tendayhourly_cachedtime)
config['CACHE']['tendayhourly_cachedtime'] = tendayhourly_cachedtime
print("Changes saved.")
logger.debug("10 day hourly cache time now %s minutes." % tendayhourly_cachedtime)
except:
print("", "Your input couldn't be converted into a number. Setting ten day hourly",
"cache time to it's default value of '60'.", sep="\n")
config['CACHE']['tendayhourly_cachedtime'] = "60"
logger.debug("10 day hourly cache time now 60 minutes")
print("", "(16/42)", "Please enter the cache time for sun data in minutes (default = 480)", sep="\n")
sundatacachetime = input("Input here: ").lower()
try:
sundatacachetime = float(sundatacachetime)
sundatacachetime = str(sundatacachetime)
config['CACHE']['sundata_cachedtime'] = forecastcachetime
print("Changes saved.")
logger.debug("Sun data cache time now %s minutes." % sundatacachetime)
except:
print("", "Your input couldn't be converted into a number. Setting sun data",
"cache time to it's default value of '480'.", sep="\n")
config['CACHE']['sundata_cachedtime'] = '480'
logger.debug("Sun data cache time now 480 minutes.")
print("", "(17/42)", "Please enter the cache time for tide data in minutes (default = 480)", sep="\n")
tidecachetime = input("Input here: ").lower()
try:
tidecachetime = float(tidecachetime)
tidecachetime = str(tidecachetime)
config['CACHE']['tide_cachedtime'] = tidecachetime
print("Changes saved.")
logger.debug("Tide cache time now %s minutes." % tidecachetime)
except:
print("", "Your input couldn't be converted into a number. Setting tide data",
"cache time to it's default value of '480'.", sep="\n")
config['CACHE']['tide_cachedtime'] = '480'
logger.debug("Tide data cache time now 480 minutes.")
print("", "(18/42)", "Please enter the cache time for hurricane data in minutes (default = 480)", sep="\n")
hurricanecachetime = input("Input here: ").lower()
try:
hurricanecachetime = float(hurricanecachetime)
hurricanecachetime = str(hurricanecachetime)
config['CACHE']['hurricane_cachedtime'] = hurricanecachetime
print("Changes saved.")
logger.debug("Hurricane cache time now %s minutes" % hurricanecachetime)
except:
print("", "Your input couldn't be converted into a number. Setting hurricane data",
"cache time to it's default value of '180'.", sep="\n")
config['CACHE']['hurricane_cachedtime'] = '180'
logger.debug("Hurricane data cache time now 180 minutes.")
print("", "(19/42)", "Please enter the cache time for yesterday's weather data in minutes (default = 720)", sep="\n")
yesterdaycachedtime = input("Input here: ").lower()
try:
yesterdaycachedtime = float(yesterdaycachedtime)
yesterdaycachedtime = str(yesterdaycachedtime)
config['CACHE']['yesterday_cachedtime'] = yesterdaycachedtime
print("Changes saved.")
logger.debug("Yesterday cache time now %s minutess" % yesterdaycachedtime)
except:
print("", "Your input couldn't be converted into a number. Setting yesterday's weather data",
"cache time to it's default value of 720.", sep="\n")
config['CACHE']['yesterday_cachedtime'] = '720'
logger.debug("Yesterday data cache time now 720 minutes.")
print("", "(20/42)", "When viewing detailed EU alerts information, how many",
"iterations should PyWeather go through, before asking you to",
"continue?",
"By default, this is 2.", sep="\n")
EUalertsloops = input("Input here: ")
try:
EUalertsloops = int(EUalertsloops)
EUalertsloops = str(EUalertsloops)
config['UI']['alerts_EUiterations'] = EUalertsloops
print("Changes saved.")
logger.debug("Detailed EU alert iterations now %s" % EUalertsloops)
except:
print("Couldn't convert input into a number. Defaulting to '2'.")
printException_loggerwarn()
config['UI']['alerts_EUiterations'] = '2'
print("Changes saved.")
logger.debug("Detailed EU alert iterations now 2.")
print("", "(21/42)", "When viewing detailed US alerts information, how many",
"iterations should PyWeather go through, before asking you to",
"continue?",
"By default, this is 1.", sep="\n")
USalertsloops = input("Input here: ")
try:
USalertsloops = int(USalertsloops)
USalertsloops = str(USalertsloops)
config['UI']['alerts_USiterations'] = USalertsloops
print("Changes saved.")
logger.debug("Detailed US alert iterations now %s" % USalertsloops)
except:
print("Couldn't convert input to a number. Defaulting to '1'.")
printException_loggerwarn()
config['UI']['alerts_USiterations'] = '1'
print("Changes saved.")
logger.debug("Detailed US alert iterations now 1.")
print("", "(22/42)","When PyWeather is going through detailed information, it can show",
"how many iterations are completed.",
"By default, this is disabled.",
"Yes or No.", sep="\n")
showIterations = input("Input here: ").lower()
if showIterations == "yes":
config['UI']['show_completediterations'] = 'True'
print("Changes saved.")
logger.debug("Showing completed iterations is ENABLED.")
elif showIterations == "no":
config['UI']['show_completediterations'] = 'False'
print("Changes saved.")
logger.debug("Showing completed iterations is DISABLED.")
else:
print("Couldn't understand what you inputted.",
"Defaulting to 'FALSE'.", sep="\n")
config['UI']['show_completediterations'] = 'False'
print("Changes saved.")
logger.debug("Could not understand input. Defaulting to DISABLED.")
print("", "(23/42)", "When PyWeather is going through detailed information, would",
"you like the 'Enter to Continue' prompts to pop up?",
"By default, this is enabled.",
"Yes or No.", sep="\n")
showEnterToContinue = input("Input here: ").lower()
if showEnterToContinue == "yes":
config['UI']['show_entertocontinue'] = 'True'
print("Changes saved.")
logger.debug("Showing enter to continue prompts is ENABLED.")
elif showEnterToContinue == "no":
config['UI']['show_entertocontinue'] = 'False'
print("Changes saved.")
logger.debug("Showing enter to continue prompts is DISABLED.")
else:
print("Could not understand what you inputted.",
"Defaulting to 'True'.", sep="\n")
config['UI']['show_entertocontinue'] = 'True'
print("Changes saved.")
logger.debug("Could not understand input. Defaulting to ENABLED.")
print("", "(24/42)", "In the PyWeather Updater, the updater can show the release tag",
"associated with the latest release. Helpful for those using Git to",
"update PyWeather. By default, this is disabled.",
"Yes or No.", sep="\n")
showReleaseTag = input("Input here: ").lower()
if showReleaseTag == "yes":
config['UPDATER']['show_updaterreleasetag'] = 'True'
print("Changes saved.")
logger.debug("Showing release tag in updater is ENABLED.")
elif showReleaseTag == "no":
config['UPDATER']['show_updaterreleasetag'] = 'False'
print("Changes saved.")
logger.debug("Showing release tag in updater is DISABLED.")
else:
print("Could not understand what you inputted.",
"Defaulting to 'False'.", sep="\n")
config['UPDATER']['show_updaterreleasetag'] = 'False'
print("Changes saved.")
logger.debug("Could not understand input. Defaulting to DISABLED.")
print("", "(25/42)", "When PyWeather boots, it can validate your API key. If PyWeather",
"finds your primary API key is invalid, it'll attempt to validate your",
"backup key, and load that if it's validated successfully.",
"By default, this is enabled, as it's well worth the 1 API call to make",
"sure your key is valid. However, if you said 'Yes' to almanac/sun data",
"on the summary screen, you might not want to enable this.",
"Yes or No.", sep="\n")
validateKeyOnBoot = input("Input here: ").lower()
if validateKeyOnBoot == "yes":
config['PYWEATHER BOOT']['validateAPIKey'] = 'True'
print("Changes saved.")
logger.debug("Validating API key on boot is ENABLED.")
elif validateKeyOnBoot == "no":
config['PYWEATHER BOOT']['validateAPIKey'] = 'False'
print("Changes saved.")
logger.debug("Validating API key on boot is DISABLED.")
else:
print("Could not understand what you inputted.",
"Defaulting to 'True'.", sep="\n")
config['PYWEATHER BOOT']['validateAPIKey'] = 'False'
logger.debug("Could not understand input. Defaulting to ENABLED.")
print("", "(26/42)", "PyWeather now has a radar feature, which opens up a GUI on supported",
"platforms. Depending on your screen resolution, you'll have to set how large",
"the radar picture is when rendered. In the prompt below, enter one of five sizes.",
"extrasmall - 320x240 window",
"small - 480x320 window",
"normal - 640x480 window",
"large - 960x720 window",
"extralarge - 1280x960 window",
"By default, the resolution is normal. Adapt your choice to the screen resolution",
"of the machine you're using.", sep="\n")
radar_resolutions = ["extrasmall", "small", "normal", "large", "extralarge"]
logger.debug("radar_resolutions: %s" % radar_resolutions)
radar_resolutioninput = input("Input here: ").lower()
for x in range(0, 5):
if radar_resolutioninput == radar_resolutions[x]:
logger.debug("Resolution input matched, end result: %s" % radar_resolutions[x])
config['RADAR GUI']['radar_imagesize'] = radar_resolutions[x]
print("Changes saved.")
break
# This works by design. If x = 4 (extralarge), the if would catch first.
elif x == 5:
print("Could not understand what you inputted. Defaulting to 'normal'.")
config['RADAR GUI']['radar_imagesize'] = 'normal'
print("Changes saved.")
print("", "(27/42)", "PyWeather's radar feature is unfortunately experimental as of PyWeather 0.6.3 beta.",
"By default, a confirmation message will always appear when attempting to launch the radar.",
"However, this can be turned off, if you plan to use the experimental radar on a regular basis.",
"By default, bypassing the confirmation message is disabled. Yes or No.", sep="\n")
radar_bypassconfinput = input("Input here: ").lower()
logger.debug("radar_bypassconfinput: %s" % radar_bypassconfinput)
if radar_bypassconfinput == "yes":
config['RADAR GUI']['bypassconfirmation'] = 'True'
logger.debug("RADAR GUI/bypassconfirmation is now TRUE")
print("Changes saved.")
elif radar_bypassconfinput == "no":
config['RADAR GUI']['bypassconfirmation'] = 'False'
logger.debug("RADAR GUI/bypassconfirmation is now FALSE")
print("Changes saved.")
else:
print("Could not understand what you inputted. Defaulting to 'False'.")
config['RADAR GUI']['bypassconfirmation'] = 'False'
logger.debug("RADAR GUI/bypassconfirmation is now FALSE")
print("Changes saved.")
print("", "(28/42)", "On the summary screen, would you like tide data to be shown?",
"This uses an extra API call when enabled. By default, this is disabled.",
"Yes or No.", sep="\n")
tideonsummary = input("Input here: ").lower()
logger.debug("tideonsummary: %s" % tideonsummary)
if tideonsummary == "yes":
config['SUMMARY']['showtideonsummary'] = "True"
logger.debug("SUMMARY/showtideonsummary is now TRUE")
print("Changes saved.")
elif tideonsummary == "no":
config['SUMMARY']['showtideonsummary'] = "False"
logger.debug("SUMMARY/showtideonsummary is now FALSE")
print("Changes saved.")
else:
print("Could not understand what you inputted. Defaulting to 'False'.")
config['SUMMARY']['showtideonsummary'] = "False"
logger.debug("SUMMARY/showtideonsummary is now FALSE")
print("Changes saved.")
print("", "(29/42)", "When PyWeather boots, would you like hurricane data to be fetched?",
"Initial loading times will increase when this is on, but hurricane data will load faster.",
"This can use an extra API call, especially when you fetch hurricane data but don't check it",
"in PyWeather. By default, this is disabled.",
"Yes or No.", sep="\n")
hurricaneprefetch = input("Input here: ").lower()
logger.debug("hurricaneprefetch: %s" % hurricaneprefetch)
if hurricaneprefetch == "yes":
config['PREFETCH']['hurricanedata_atboot'] = 'True'
logger.debug("PREFETCH/hurricanedata_atbooot is now TRUE.")
print("Changes saved.")
elif hurricaneprefetch == "no":
config['PREFETCH']['hurricanedata_atboot'] = 'False'
logger.debug("PREFETCH/hurricanedata_atboot is now FALSE.")
print("Changes saved.")
else:
print("Could not understand what you inputted. Defaulting to 'False'.")
config['PREFETCH']['hurricanedata_atboot'] = 'False'
logger.debug("PREFETCH/hurricanedata_atboot is now FALSE.")
print("Changes saved.")
print("", "(30/42)", "PyWeather has a new feature where you can now easily call your current location at boot.",
"The current location feature allows you to enter 'currentlocation' at boot, and view the weather for your",
"approximate location. However, GeoIP lookups might be inaccurate, especially for mobile users. The GeoIP service",
"uses freegeoip.net. Would you like to enable this service? By default, this is disabled. Yes or No.", sep="\n")
allowgeoipservice = input("Input here: ").lower()
logger.debug("allowgeoipservice: %s" % allowgeoipservice)
if allowgeoipservice == "yes":
config['FIRSTINPUT']['geoipservice_enabled'] = 'True'
logger.debug("FIRSTINPUT/geoipservice_enabled is now TRUE.")
print("Changes saved.")
elif allowgeoipservice == "no":
config['FIRSTINPUT']['geoipservice_enabled'] = 'False'
logger.debug("FIRSTINPUT/geoipservice_enabled is now FALSE.")
else:
print("Could not understand what you inputted. Defaulting to 'False'.")
config['FIRSTINPUT']['geoipservice_enabled'] = 'False'
logger.debug("FIRSTINPUT/geoipservice_enabled is now FALSE.")
print("Changes saved.")
print("", "(31/42)", "PyWeather has a new feature where you can query indivdiual Wunderground PWS stations.",
"You can query any PWS globally by entering pws:<pws ID> when enabled, and where <pws ID> is the ID of the",
"PWS you want to query. However, this can be turned off if you don't want to have extra lines of text at boot,",
"or don't want the ability to query PWSes. By default, this is enabled. Yes or No.", sep="\n")
allowpwsqueries = input("Input here: ").lower()
logger.debug("allowpwsqueries: %s" % allowpwsqueries)
if allowpwsqueries == "yes":
config['FIRSTINPUT']['allow_pwsqueries'] = 'True'
logger.debug("FIRSTINPUT/allow_pwsqueries is now TRUE.")
print("Changes saved.")
elif allowpwsqueries == "no":
config['FIRSTINPUT']['allow_pwsqueries'] = 'False'
logger.debug("FIRSTINPUT/allow_pwsqueries is now FALSE.")
print("Changes saved.")
else:
print("Could not understand what you inputted. Defaulting to 'True'.")
config['FIRSTINPUT']['allow_pwsqueries'] = 'True'
logger.debug("FIRSTINPUT/allow_pwsqueries is now TRUE.")
print("Changes saved.")
print("", "(32/42)", "PyWeather has a new feature where in hurricane data, you can see the nearest city that a hurricane is to.",
"However, this feature uses a separate API (geonames.org), can only work when the hurricane is within 300km of a city,",
"and will drastically increase loading times. You may also run into issues with the default API key hitting rate limits.",
"Despite all of this, would you like to enable the nearest city features for non-forecast hurricane data?",
"Yes or No. By default, this is disabled.", sep="\n")
allownearestcities = input("Input here: ").lower()
logger.debug("allownearestcities: %s" % allownearestcities)
if allownearestcities == "yes":
additional_ncoptions = True
logger.debug("additional_ncoptions: %s" % additional_ncoptions)
config['HURRICANE']['enablenearestcity'] = 'True'
logger.debug("HURRICANE/enablenearestcity is now TRUE.")
print("Changes saved.")
elif allownearestcities == "no":
additional_ncoptions = False
logger.debug("additional_ncoptions: %s" % additional_ncoptions)
config['HURRICANE']['enablenearestcity'] = 'False'
logger.debug("HURRICANE/enablenearestcity is now FALSE.")
print("Changes saved.")
else:
additional_ncoptions = False
logger.debug("additional_ncoptions: %s" % additional_ncoptions)
print("Could not understand what you inputted. Defaulting to 'False'.")
config['HURRICANE']['enablenearestcity'] = 'False'
logger.debug("HURRICANE/enablenearestcity is now FALSE.")
print("Changes saved.")
# <--- Additional options for nearest city feature --->
if additional_ncoptions is True:
print("", "(33/42)", "By default, the nearest city feature is only enabled on the current data screen of hurricane data.",
"You can enable the nearest city feature to be enabled on forecast data. However, loading hurricane data becomes much",
"slower. By default, this is disabled. Yes or No.", sep="\n")
enable_ncforecast = input("Input here: ").lower()
if enable_ncforecast == "yes":
config['HURRICANE']['enablenearestcity_forecast'] = 'True'
logger.debug("HURRICANE/enablenearestcity_forecast is now TRUE.")
print("Changes saved.")
elif enable_ncforecast == "no":
config['HURRICANE']['enablenearestcity_forecast'] = 'False'
logger.debug("HURRICANE/enablenearestcity_forecast is now FALSE.")
print("Changes saved.")
else:
print("Could not understand your input. Defaulting to 'False'.")
config['HURRICANE']['enablenearestcity_forecast'] = 'False'
logger.debug("HURRICANE/enablenearestcity_forecast is now FALSE.")
print("Changes saved.")
print("", "(34/42)", "By default, PyWeather uses it's own API username for the nearest city features, which should be able to",
"handle PyWeather's user demands just fine. However, if you'd like to use your own account for the API, you may.",
"You can sign up at geonames.org, and follow all the steps. The confirmation letter may take some time to hit your inbox.",
"Would you like to define your own API username? Yes or No. By default, this is no.", sep="\n")
definegeonamesusername = input("Input here: ").lower()
logger.debug("definegeonamesusername: %s" % definegeonamesusername)
if definegeonamesusername == "yes":
# Enter into confirmation loop
while True:
print("Please enter the username that you'll use to access the geonames API.")
geonamesusername = input("Input here: ").lower()
logger.debug("geonamesusername: %s" % geonamesusername)
print("The API username you gave me was: %s" % geonamesusername,
"Is this the username that you'd like to use? Yes or No.",
"Please note that your username will not be validated.", sep="\n")
geonamesconfirmation = input("Input here: ").lower()
confirmurl = 'http://api.geonames.org/findNearbyPlaceNameJSON?lat=19.3&lng=102.2&username= ' + geonamesusername + '&radius=300&maxRows=1&cities=cities5000'
logger.debug("geonamesconfirmation: %s ; confirmurl: %s" %
(geonamesconfirmation, confirmurl))
if geonamesconfirmation == "yes":
config['HURRICANE']['api_username'] = geonamesusername
logger.debug("HURRICANE/api_username is now %s" % geonamesusername)
print("Changes saved.")
elif geonamesconfirmation == "no":
continue
else:
print("Input not understood. Will not validate username. If the username is",
"invalid, please change the HURRICANE/api_username option in the config.", sep="\n")
config['HURRICANE']['api_username'] = geonamesusername
logger.debug("HURRICANE/api_username is now %s" % geonamesusername)
print("Changes saved.")
elif definegeonamesusername == "no":
print("Defaulting to the default username for the geonames API.")
else:
print("Input not understood.",
"Defaulting to the default username for the geonames API.", sep="\n")
print("", "(35/42)", "For the nearest city feature, you can define how large a city has to be to show up as a nearest city.",
"You have three options for this. 'small' will set the threshold to cities with a 1,000 population and greater, but this",
"tends to include cities with very few or no people. 'medium' will set the threshold to cities with a 5,000 population",
"and greater, and 'large' for cities that have a population of 10,000 or greater. Please enter either 'small', 'medium'",
"or 'large' below. Default is 'medium'.", sep="\n")
nearestcitysize = input("Input here: ").lower()
logger.debug("nearestcitysize: %s" % nearestcitysize)
if nearestcitysize == "small":
config['HURRICANE']['nearestcitysize'] = 'small'
logger.debug("HURRICANE/nearestcitysize is now 'small'.")
print("Changes saved.")
elif nearestcitysize == "medium":
config['HURRICANE']['nearestcitysize'] = 'medium'
logger.debug("HURRICANE/nearestcitysize is now 'medium'")
print("Changes saved.")
else:
print("Could not understand your input. Defaulting to 'medium'.")
config['HURRICANE']['nearestcitysize'] = 'medium'
logger.debug("HURRICANE/nearestcitysize is now 'medium'.")
print("Changes saved.")
print("", "(36/42)", "PyWeather will now let you enable a favorite locations feature, which allows",
"you to quickly call up to 5 locations in PyWeather. You have the ability to configure your",
"favorite locations in a menu option in PyWeather. By default, this feature is enabled.",
"Yes or No.", sep="\n")
enable_favoritelocations = input("Input here: ").lower()
logger.debug("enable_favoritelocations: %s" % enable_favoritelocations)
if enable_favoritelocations == "yes":
config['FAVORITE LOCATIONS']['enabled'] = 'True'
logger.debug("FAVORITE LOCATIONS/enabled is now 'True'.")
print("Changes saved!")
elif enable_favoritelocations == "no":
config['FAVORITE LOCATIONS']['enabled'] = 'False'
logger.debug("FAVORITE LOCATIONS/enabled is now 'False'.")
print("Changes saved!")
else:
print("Could not understand your input. Defaulting to 'True'.")
config['FAVORITE LOCATIONS']['enabled'] = 'True'
logger.debug("FAVORITE LOCATIONS/enabled is now 'True'.")
print("Changes saved!")
print("", "(37/43)", "PyWeather can now store your previously searched locations.",
"You have the ability to configure your previous locations in a menu option",
"in PyWeather. By default this feature is enabled.",
"Yes or No.", sep="\n")
enable_previouslocations = input("Input here: ").lower()
logger.debug("enable_previouslocations: %s" % enable_previouslocations)
if enable_previouslocations == "yes":
config['PREVIOUS LOCATIONS']['enabled'] = 'True'
logger.debug("PREVIOUS LOCATIONS/enabled is now 'True'.")
print("Changes saved!")
elif enable_previouslocations == "no":
config['PREVIOUS LOCATIONS']['enabled'] = 'False'
logger.debug("PREVIOUS LOCATIONS/enabled is now 'False'.")
print("Changes saved.")
else:
print("Could not understand your input. Defaulting to 'True'.")
config['PREVIOUS LOCATIONS']['enabled'] = 'True'
logger.debug("PREVIOUS LOCATIONS/enabled is now 'True'.")
print("", "(37/42)", "PyWeather by default uses Google's geocoder, which can occasionally have rate limiting issues.",
"To get around this, you can manually use your own API key that you sign up for with Google. This is completely",
"optional, and you can continue past this step and not impede PyWeather's functionality. However, would you like",
"to enable the use of a custom API key for the geocoder? Yes or No.", sep="\n")
enablecustomgeocoderkey = input("Input here: ").lower()
logger.debug("enablecustomgeocoderkey: %s" % enablecustomgeocoderkey)
if enablecustomgeocoderkey == "yes":
print("", "(38/42)", "To sign up for a Google Maps API key, please visit this link: ",
"https://developers.google.com/maps/documentation/javascript/get-api-key",
"Press the button 'Get Key', and wait a minute. Copy and paste the key into the input",
"below. Your API key will NOT be validated. Enter 'exit' to exit this process, and to disable",
"a custom API key.", sep="\n")
customgeocoderkey = input("Input here: ")
logger.debug("customgeocoderkey: %s" % customgeocoderkey)
while True:
print("", "The API key you entered is: %s" % customgeocoderkey,
"Is this the API key you want to use? Yes or No.", sep="\n")
confirmcustomgeocoderkey = input("Input here: ").lower()
logger.debug("confirmcustomgeocoderkey: %s" % confirmcustomgeocoderkey)
if confirmcustomgeocoderkey == "yes":
break
else:
if confirmcustomgeocoderkey != "no":
print("Couldn't understand your input. Please input your API key again.")
print("Please enter the API key you want to use below.")
customgeocoderkey = input("Input here: ")
logger.debug("customgeocoderkey: %s" % customgeocoderkey)
if customgeocoderkey == "exit":
print("Exiting the custom geocoder key process, and disabling a custom geocoder key.")
config['GEOCODER API']['customkey_enabled'] = 'False'
logger.debug("GEOCODER API/customkey_enabled is now FALSE.")
print("Changes saved.")
else:
config['GEOCODER API']['customkey_enabled'] = 'True'
config['GEOCODER API']['customkey'] = str(customgeocoderkey)
logger.debug("GEOCODER API/customkey_enabled is now TRUE.")
print("Changes saved.")
elif enablecustomgeocoderkey == "no":
config['GEOCODER API']['customkey_enabled'] = 'False'
logger.debug("GEOCODER API/customkey_enabled is now FALSE.")
print("Changes saved.")
else:
print("Your input could not be understood. Defaulting to 'False'.")
config['GEOCODER API']['customkey_enabled'] = 'False'
logger.debug("GEOCODER API/customkey_enabled is now FALSE.")
print("Changes saved.")
print("", "(39/42)", "On the summary screen, you can now view a summary of the weather that occurred yesterday.",
"Enabling this will also enable the option to prefetch yesterday's weather at boot in the config file.",
"Please note that enabling this uses 1 extra API call at boot, and will increase PyWeather's loading time.",
"Would you like to turn on showing yesterday's weather on the summary screen? Yes or No. By default, this is",
"disabled.", sep="\n")
showyesterdayonsummary = input("Input here: ").lower()
logger.debug("showyesterdayonsummary: %s" % showyesterdayonsummary)
if showyesterdayonsummary == "yes":
config['SUMMARY']['showyesterdayonsummary'] = 'True'
logger.info("SUMMARY/showyesterdayonsummary is now 'True'.")
config['PREFETCH']['yesterdaydata_atboot'] = 'True'
logger.info("PREFETCH/yesterdaydata_atboot is now 'True'.")
showyesterdayonsummary = True
logger.debug("showyesterdayonsummary: %s" % showyesterdayonsummary)
print("Changes saved.")
elif showyesterdayonsummary == "no":
config['SUMMARY']['showyesterdayonsummary'] = 'False'
logger.info("SUMMARY/showyesterdayonsummary is now 'False'.")
showyesterdayonsummary = False
logger.debug("showyesterdayonsummary: %s" % showyesterdayonsummary)
print("Changes saved.")
else:
print("Your input could not be understood. Defaulting to 'False'.")
config['SUMMARY']['showyesterdayonsummary'] = 'False'
logger.info("SUMMARY/showyesterdayonsumary is now 'False'.")
showyesterdayonsummary = False
logger.debug("showyesterdayonsummary: %s" % showyesterdayonsummary)
print("Changes saved.")
if showyesterdayonsummary is False:
print("", "(40/42)", "When PyWeather boots up, you can have the option to have yesterday's weather data",
"prefetched during bootup. Enabling this will use 1 extra API call at boot, and will increase PyWeather's",
"loading time. Would you like to enable prefetching yesterday's weather data on boot? Yes or No.",
"By default, this is disabled.", sep="\n")
prefetchyesterdayatboot = input("Input here: ").lower()
logger.debug("prefetchyesterdayatboot: %s" % prefetchyesterdayatboot)
if prefetchyesterdayatboot == "yes":
config['PREFETCH']['yesterdaydata_atboot'] = 'True'
logger.info("PREFETCH/yesterdaydata_atboot is now 'True'.")
print("Changes saved.")
elif prefetchyesterdayatboot == "no":
config['PREFETCH']['yesterdaydata_atboot'] = 'False'
logger.info("PREFETCH/yesterdaydata_atboot is now 'False'.")
print("Changes saved.")
else:
print("Your input could not be understood. Defaulting to 'False'.")
config['PREFETCH']['yesterdaydata_atboot'] = 'False'
logger.info("PREFETCH/yesterdaydata_atboot is now 'False'.")
print("Changes saved.")
print("", "(41/42)", "In 0.6.3 beta and newer, you have the option to enable extra tools for PyWeather.",
"Extra tools are diagnostic tools, and so far you can see cache timings in PyWeather, and more extra tools",
"will be added as time goes on. Would you like to enable the ability to use extra tools? Yes or No. By default",
"this is disabled.", sep="\n")
enableextratools = input("Input here: ").lower()
logger.debug("enableextratools: %s" % enableextratools)
if enableextratools == "yes":
config['UI']['extratools_enabled'] = 'True'
logger.info("UI/extratools_enabled is now 'True'.")
print("Changes saved.")
elif enableextratools == "no":
config['UI']['extratools_enabled'] = 'False'
logger.info("UI/extratools_enabled is now 'False'.")
print("Changes saved.")
else:
print("Could not understand your input. Defaulting to 'False'.")
config['UI']['extratools_enabled'] = 'False'
logger.info("UI/extratools_enabled is now 'False'.")
print("Changes saved.")
print("", "(42/42)", "PyWeather's geocoder usually uses https, but issues have been discovered",
"on some platforms, where the geocoder cannot operate in the https mode. If you press enter",
"PyWeather will automatically detect which scheme to use. If you are an advanced user, and want",
"to configure the scheme yourself, enter advancedconfig at the prompt below.", sep="\n")
configuregeopyscheme = input("Input here: ").lower()
logger.debug("configuregeopyscheme: %s" % configuregeopyscheme)
if configuregeopyscheme == "advancedconfig":
print("Which geopy scheme would you like to use? 'https' works on most platforms",
"but 'http' is needed on some platforms (OS X, as an example). Please input",
"'https' or 'http' below.")
geopyschemetype = input("Input here: ").lower()
logger.debug("geopyschemetype: %s" % geopyschemetype)
if geopyschemetype == "https":
config['GEOCDER']['scheme'] = 'https'
logger.debug("GEOCODER/scheme is now 'https'")
print("Changes saved. Geocoder settings will not be validated.")
elif geopyschemetype == "http":
config['GEOCODER']['scheme'] = 'http'
logger.debug("GEOCODER/scheme is now 'http'")
print("Changes saved. Geocoder settings will not be validated.")
else:
print("Your input could not be understood. Defaulting to 'https'.")
logger.debug("GEOCODER/scheme is now 'https'")
print("Changes saved. Geocoder settings will not be validated.")
else:
print("Now automatically configuring your geopy scheme.")
# HTTPS validation
from geopy import GoogleV3
geocoder = GoogleV3(scheme='https')
# I've found that one "warm up request", and then waiting ~15 seconds somehow helps determine if a platform is HTTP/HTTPS compatible.
try:
geocoder.geocode("123 5th Avenue, New York, NY")
except:
logger.debug("Warm up geocode failed.")
print("I've just completed a warm-up geocode. However, sometimes a rate limit will",
"occur after this geocode. I've paused the setup process for 10 seconds. This",
"should help with figuring out what scheme works on your OS.", sep="\n")
time.sleep(10)
try:
geocoder.geocode("123 5th Avenue, New York, NY")
print("The geocoder can operate with HTTPS enabled on your OS. Saving these changes...")
config['GEOCODER']['scheme'] = 'https'
logger.debug("GEOCODER/scheme is now 'https'")
print("Changes saved.")
except geopy.exc.GeocoderServiceError:
print("Geopy probably can't run without HTTPS (or your internet went down). Trying HTTP as the scheme...")
geocoder = GoogleV3(scheme='http')
print("Waiting 10 seconds to avoid rate limiting after the previous geocode...")
time.sleep(10)
try:
geocoder.geocode("123 5th Avenue, New York, NY")
print("The geocoder can operate, but without HTTPS enabled on your OS. Saving these changes...")
config['GEOCODER']['scheme'] = 'http'
logger.debug("GEOCODER/scheme is now 'http'")
print("Changes saved.")
except geopy.exc.GeocoderServiceError:
print("You probably don't have an internet connection, as HTTPS and HTTP validation both failed.",
"Defaulting to HTTP as the geopy scheme...", sep="\n")
config['GEOCODER']['scheme'] = 'http'
logger.debug("GEOCODER/scheme is now 'http'")
print("Changes saved.")
# if showing yesterday is disabled show prefetch yesterday
# if show yest. on sum. is enabled enable prefetch too basically the same code
print("","That's it! Now commiting config changes...", sep="\n")
try:
with open('storage//config.ini', 'w') as configfile:
logger.debug("configfile: %s" % configfile)
config.write(configfile)
print("Changes committed!")
logger.info("Performed operation: config.write(configfile)")
except:
print("The config file couldn't be written to.",
"Make sure the config file can be written to.", sep="\n")
printException()
print("Press enter to exit.")
input()
sys.exit()
print("","Everything is set up and ready to rumble!",
"Enjoy using PyWeather! If you have any issues, please report them on GitHub!",
"Press enter to continue.", sep="\n")
input()
sys.exit()
| gpl-3.0 | 5,827,768,068,210,387,000 | 47.070297 | 167 | 0.616019 | false |
lambdaq/pytr | core.py | 1 | 7912 | #!/usr/bin/env python
# coding: utf8
# from gevent import monkey
# monkey.patch_all()
import socket
import os, sys
import random, struct
import logging
from collections import deque, Counter, defaultdict
logger = logging.getLogger(__file__)
logger.addHandler(logging.StreamHandler(sys.stderr))
logger.setLevel(logging.ERROR)
class UdpIpParser(object):
"""parse IP+UDP"""
def __init__(self, data):
self.data = data
self.ip_hdrl = ip_hdrl = ((data[0]) & 0x0F) * 4
self.udp_payload_len = struct.unpack(
'!H',
data[ip_hdrl + 4:ip_hdrl + 6])[0]
@property
def payload(self):
udp_hdrl = 8
return self.data[self.ip_hdrl + udp_hdrl:self.ip_hdrl + self.udp_payload_len]
class IpPacket(object):
def __init__(self, data):
self.data = data
self.hdrl = (0x0F & (data[0])) * 4
self.payload = self.data[self.hdrl:]
self.ttl = self.data[8]
@property
def src_ip(self):
return socket.inet_ntoa(str(self.data[12:16]))
@property
def dst_ip(self):
return socket.inet_ntoa(str(self.data[16:20]))
class IcmpParser(object):
hdrl = 8
def __init__(self, data):
self.data = data
@property
def type(self):
return self.data[0]
@property
def payload(self):
return self.data[8:14]
@property
def id(self):
return struct.unpack('>H', self.data[4:6])[0]
def checksum(msg):
# simplest rfc1071. msg is bytearray
s = 0
for i in range(0, len(msg), 2):
w = msg[i] + (msg[i + 1] << 8)
c = s + w
s = (c & 0xffff) + (c >> 16)
return ~s & 0xffff
def create_ping(id=None):
id = id or random.randint(30000, 65500)
icmp_type = 8
icmp_code = 0
icmp_checksum = 0
icmp_seq = 1
icmp_timestamp = 0
data = '%06d' % id
s = struct.Struct('!bbHHhQ%ss' % len(data))
msg = bytearray(s.size)
s.pack_into(
msg, 0,
icmp_type, icmp_code, icmp_checksum, id,
icmp_seq, icmp_timestamp, data)
# calculate ICMP checksum, which can not be offloaded
cs = checksum(msg)
struct.pack_into('<H', msg, 2, cs)
return msg
def guess_hop(ttl):
if not ttl:
return
if ttl >= 128:
return 256 - ttl
elif 64 < ttl < 128:
return 128 - ttl
else:
return 64 - ttl
MAX_RETRY = 5
class Tracer(object):
MAX_TTL = 32
def __init__(self):
"""
packet send rate = self.batch_size/self.timeout
- hosts is iterable target IPs
"""
self.batch_size = 100
self.max_retry = 10
self.timeout = 1
self.running = self.timeout * self.max_retry
self.max_ttl = defaultdict(lambda: self.MAX_TTL)
self.echo_map = {}
self.in_flight = deque(maxlen=self.batch_size) # a list of ip-ttl tuples
self.retries = Counter() # remaining retries
self.result = defaultdict(dict) # {ip: [hop1, hop2, ...]}
self.sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
self.sock.bind(('', 0))
self.sock.settimeout(self.timeout)
def _iter_ip_and_ttl(self, hosts):
"""generate all IPs and their hops need to ping
Need consider retries.
"""
for ip in hosts:
for ttl in xrange(1, self.MAX_TTL + 1):
if ttl >= self.max_ttl[ip]:
break
resp = (ip.strip(), ttl)
self.in_flight.append(resp)
yield resp
def run(self, hosts):
"""would block"""
self.ip_and_ttl = self._iter_ip_and_ttl(hosts)
self.tick()
while self.running > 0:
data = bytearray(1024)
try:
nbytes, addr = self.sock.recvfrom_into(data)
self.on_data(data, addr[0])
except socket.timeout:
self.tick()
return self.result
def _iter_retry(self):
i = 0
while self.in_flight and self.retries:
if not i < len(self.in_flight):
return
key = self.in_flight[i]
if self.retries[key] > 0:
self.retries[key] -= 1
yield key
i += 1
if self.retries[key] <= 0:
self.on_retry_fail(*key)
i -= 1
def on_retry_fail(self, ip, ttl):
self.retries.pop((ip, ttl), None)
self.in_flight.remove((ip, ttl))
if ttl <= self.max_ttl[ip]:
self.result[ip][ttl] = '?'
@property
def on_tick(self):
return getattr(self, '_on_tick', None) or (lambda *args: None)
@on_tick.setter
def on_tick(self, func):
self._on_tick = func
@property
def on_pong(self):
return getattr(self, '_on_pong', None) or (lambda *args: None)
@on_pong.setter
def on_pong(self, func):
self._on_pong = func
def tick(self):
logger.debug('in_flight=%s, retries=%s', len(self.in_flight), self.retries.most_common(4))
self.on_tick(self)
sent = 0
for ip, ttl in self._iter_retry():
self.ping(ip, ttl)
sent += 1
if sent >= self.batch_size:
break
while sent < self.batch_size:
try:
ip, ttl = self.ip_and_ttl.next()
except StopIteration:
self.running -= self.timeout
return
self.ping(ip, ttl)
self.retries[(ip, ttl)] = self.max_retry
sent += 1
def ping(self, ip, ttl):
logger.debug("Ping %s, ttl=%s", ip, ttl)
key = (ip, ttl)
sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
sock.bind(('', 0))
sock.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
icmp_id = random.randint(30000, 60000)
self.echo_map[icmp_id] = (ip, ttl)
packet = create_ping(icmp_id)
sock.sendto(packet, (ip, 0))
sock.close()
return icmp_id
def pong(self, ping_ip, pong_ip, ttl):
# @ToDo: handle multi-path trace-route
if ping_ip == pong_ip:
ttl = min(ttl, self.max_ttl[ping_ip])
self.max_ttl[ping_ip] = ttl
for k in xrange(1, self.MAX_TTL):
ip = self.result[ping_ip].get(k)
if k > ttl or ip == ping_ip:
self.result[ping_ip].pop(k, None)
key = ping_ip, ttl
try:
self.in_flight.remove(key)
except ValueError:
pass
self.retries.pop(key, None)
else:
key = ping_ip, ttl
try:
self.in_flight.remove(key)
except ValueError:
pass
self.retries.pop(key, None)
self.result[ping_ip][ttl] = pong_ip
self.on_pong(self, ping_ip, pong_ip, ttl)
def on_data(self, data, addr):
# get IP packet inside returned IP
outer_ip = IpPacket(data)
inner_ip = IpPacket(outer_ip.payload[IcmpParser.hdrl:])
# the raw structure is: IP(ICMP(IP(ICMP)))
icmp = IcmpParser(inner_ip.payload)
icmp_id = None
if icmp.payload.isdigit():
icmp_id = int(icmp.payload)
if not icmp_id:
icmp_id = icmp.id
if icmp_id in self.echo_map:
ip, ttl = self.echo_map[icmp_id]
logger.debug('Pong %s, ip=%s, hop=%s', ip, addr, ttl)
# f.write('%s\t%s\t%s\n' % (ip, ttl, addr))
self.pong(ip, addr, ttl)
else:
logger.debug('Pong unknown %s -> %s type %s' % (
inner_ip.src_ip, inner_ip.dst_ip, icmp.type))
def get_hops(res):
return [res.get(i) or '?' for i in xrange(max(res.keys()), 0, -1)]
| bsd-2-clause | -1,979,069,231,076,845,000 | 27.056738 | 98 | 0.523129 | false |
waxkinetic/fabcloudkit | fabcloudkit/build_tools/python_build.py | 1 | 7221 | """
fabcloudkit
:copyright: (c) 2013 by Rick Bohrer.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
# pypi
from fabric.context_managers import cd, prefix, settings
from fabric.operations import run, sudo
from fabric.state import env
# package
from fabcloudkit import ctx
from ..build import build_repo, BuildInfo
from ..internal import *
from ..toolbase import Tool
from ..tool.virtualenv import VirtualEnvTool
from ..util import copy_file_from
class PythonBuildTool(Tool):
def build(self, repos, reference_repo=None, post_build=None, interpreter=None, tarball=False, unittest=None):
"""Performs a 'python' build.
Performs a python build by running setup.py in each identified repo. If desired, repos can
be refreshed first (e.g., via git pull).
:param repos:
specifies the list of repos in which to run setup.py.
:param reference_repo:
optional; the reference repo from which to retrieve the head commit id.
this id used as a component of the build name. if not specified, the
first repo in the context is used.
:param post_build:
a list of post-build commands. a list of dictionaries. each dict must
contain the key "command" that specifies the command to execute. optionally,
it may include a "sudo" value of [True|False], and an "ignore_fail" value
of [True|False].
:param interpreter:
specifies the Python interpreter to use in the build's virtualenv. if
not specified, the operating system default interpreter is used. note
that the interpreter must already exist on the system.
:param tarball:
True to create a tarball of the build; this is required if any other
instance will use "copy_from".
:param unittest:
TBD
:return:
the new build name
"""
start_msg('Executing build for instance in role "{0}":'.format(env.role_name))
# increment the build name and create a new virtualenv for the build.
build_name = self._increment_name(reference_repo)
build_env_dir = ctx().build_path(build_name)
VirtualEnvTool().ensure(build_env_dir, interpreter)
# run "setup.py install" in each repo.
for repo_name in ([repos] if isinstance(repos, basestring) else repos):
build_repo(build_env_dir, ctx().get_repo(repo_name))
# run tests.
self._unittest(unittest, build_name)
# save the last known good build-name.
BuildInfo.set_last_good(build_name)
if tarball:
self._tarball(build_name)
# execute any post-build commands.
if post_build:
self._execute_post_build(post_build, build_name)
# make the build_name available to the caller; it'll be set as an instance-tag.
succeed_msg('Build completed successfully for role "{0}".'.format(env.role_name))
env.role.set_env(build_result=build_name)
return self
def copy_from(self, role_name, post_build=None, delete_tar=True):
"""Copies an existing build from an instance in the specified role.
Instead of building itself, a build is copied from another instance to the current
instance.
:param role_name: the role of the instance to copy the build tarball from.
:param post_build: list of post-build commands to execute.
:param delete_tar: True to delete the tarball, False otherwise.
:return: the name of the copied build.
"""
# get the last known good build from the source machine.
# note: we could alternatively get this from an instance tag.
message('Copying build from instance in role: "{0}"'.format(role_name))
inst, role = ctx().get_host_in_role(role_name)
with settings(host_string=inst.public_dns_name, user=role.user):
message('Getting last good build-name from: "{0}"'.format(role_name))
src_build_name = BuildInfo().get_last_good()
# copy it from the source machine. note that all machines must have been provisioned
# properly to allow the current machine access to the source machine.
tarball = self._tarball_name(src_build_name)
path = ctx().build_path(tarball)
copy_file_from(role.user, inst.private_dns_name, path, path)
with cd(ctx().builds_root()):
# untar it.
command = 'tar -x --file={tarball}'.format(**locals())
result = run(command)
if result.failed:
raise HaltError('Failed to untar: "{0}"'.format(path))
# delete the tar.
if delete_tar:
run('rm {tarball}'.format(**locals()))
# update the build information.
BuildInfo().set_last_good(src_build_name)
# execute any post-build commands.
if post_build:
self._execute_post_build(post_build, src_build_name)
succeed_msg('Successfully copied build: "{0}"'.format(src_build_name))
return src_build_name
def _execute_post_build(self, cmd_lst, build_name):
message('Running post-build commands:')
with prefix(VirtualEnvTool.activate_prefix(ctx().build_path(build_name))):
for desc in cmd_lst:
f = sudo if desc.get('sudo', False) else run
result = f(desc['command'])
if result.failed and not desc.get('ignore_fail', False):
raise HaltError('Post-build command failed: "{0}"'.format(desc['command']))
message('Completed post-build commands.')
return self
def _increment_name(self, ref_repo_name):
# some projects have more than one repo. in this case one is designated as the "reference".
# the reference repo gives it's most recent commit ID that's used in the new build name.
# if no reference is given, just use the first (hopefully, the only) repo in the Context.
if ref_repo_name:
ref_repo = ctx().get_repo(ref_repo_name)
else:
ref_repo = ctx().repos()[0]
name = BuildInfo.next(ref_repo.dir)
succeed_msg('Created new build name: "{0}"'.format(name))
return name
def _tarball(self, build_name):
tarball = self._tarball_name(build_name)
dir_to_tar = ctx().build_path(build_name)
with cd(ctx().builds_root()):
options = '--create --gzip --format=ustar --owner=0 --group=0'
command = 'tar {options} --file={tarball} {build_name}'.format(**locals())
result = run(command)
if result.failed:
raise HaltError('Failed to create tarball for: "{0}"'.format(dir_to_tar))
succeed_msg('Created build tarball: "{0}"'.format(tarball))
return self
def _tarball_name(self, build_name):
return '{build_name}.tar.gz'.format(**locals())
def _unittest(self, plan, build_name):
failed_msg('The action "unittest" is not implemented (yet).')
return self
# register.
Tool.__tools__['python_build'] = PythonBuildTool
| bsd-3-clause | 4,460,363,286,732,108,300 | 39.116667 | 113 | 0.62526 | false |
ajstarna/RicochetRobots | Brobot/model.py | 1 | 9336 | import itertools
import random
# Directions
NORTH = 'N'
EAST = 'E'
SOUTH = 'S'
WEST = 'W'
DIRECTIONS = [NORTH, EAST, SOUTH, WEST]
REVERSE = {
NORTH: SOUTH,
EAST: WEST,
SOUTH: NORTH,
WEST: EAST,
}
OFFSET = {
NORTH: -16,
EAST: 1,
SOUTH: 16,
WEST: -1,
}
# Masks
M_NORTH = 0x01
M_EAST = 0x02
M_SOUTH = 0x04
M_WEST = 0x08
M_ROBOT = 0x10
M_LOOKUP = {
NORTH: M_NORTH,
EAST: M_EAST,
SOUTH: M_SOUTH,
WEST: M_WEST,
}
# Colors
RED = 'R'
GREEN = 'G'
BLUE = 'B'
YELLOW = 'Y'
COLORS = [RED, GREEN, BLUE, YELLOW]
# Shapes
CIRCLE = 'C'
TRIANGLE = 'T'
SQUARE = 'Q'
HEXAGON = 'H'
SHAPES = [CIRCLE, TRIANGLE, SQUARE, HEXAGON]
# Tokens
TOKENS = [''.join(token) for token in itertools.product(COLORS, SHAPES)]
# Quadrants
QUAD_1A = (
'NW,N,N,N,NE,NW,N,N,'
'W,S,X,X,X,X,SEYH,W,'
'WE,NWGT,X,X,X,X,N,X,'
'W,X,X,X,X,X,X,X,'
'W,X,X,X,X,X,S,X,'
'SW,X,X,X,X,X,NEBQ,W,'
'NW,X,E,SWRC,X,X,X,S,'
'W,X,X,N,X,X,E,NW'
)
QUAD_1B = (
'NW,NE,NW,N,NS,N,N,N,'
'W,S,X,E,NWRC,X,X,X,'
'W,NEGT,W,X,X,X,X,X,'
'W,X,X,X,X,X,SEYH,W,'
'W,X,X,X,X,X,N,X,'
'SW,X,X,X,X,X,X,X,'
'NW,X,E,SWBQ,X,X,X,S,'
'W,X,X,N,X,X,E,NW'
)
QUAD_2A = (
'NW,N,N,NE,NW,N,N,N,'
'W,X,X,X,X,E,SWBC,X,'
'W,S,X,X,X,X,N,X,'
'W,NEYT,W,X,X,S,X,X,'
'W,X,X,X,E,NWGQ,X,X,'
'W,X,SERH,W,X,X,X,X,'
'SW,X,N,X,X,X,X,S,'
'NW,X,X,X,X,X,E,NW'
)
QUAD_2B = (
'NW,N,N,N,NE,NW,N,N,'
'W,X,SERH,W,X,X,X,X,'
'W,X,N,X,X,X,X,X,'
'WE,SWGQ,X,X,X,X,S,X,'
'SW,N,X,X,X,E,NWYT,X,'
'NW,X,X,X,X,S,X,X,'
'W,X,X,X,X,NEBC,W,S,'
'W,X,X,X,X,X,E,NW'
)
QUAD_3A = (
'NW,N,N,NE,NW,N,N,N,'
'W,X,X,X,X,SEGH,W,X,'
'WE,SWRQ,X,X,X,N,X,X,'
'SW,N,X,X,X,X,S,X,'
'NW,X,X,X,X,E,NWYC,X,'
'W,X,S,X,X,X,X,X,'
'W,X,NEBT,W,X,X,X,S,'
'W,X,X,X,X,X,E,NW'
)
QUAD_3B = (
'NW,N,NS,N,NE,NW,N,N,'
'W,E,NWYC,X,X,X,X,X,'
'W,X,X,X,X,X,X,X,'
'W,X,X,X,X,E,SWBT,X,'
'SW,X,X,X,S,X,N,X,'
'NW,X,X,X,NERQ,W,X,X,'
'W,SEGH,W,X,X,X,X,S,'
'W,N,X,X,X,X,E,NW'
)
QUAD_4A = (
'NW,N,N,NE,NW,N,N,N,'
'W,X,X,X,X,X,X,X,'
'W,X,X,X,X,SEBH,W,X,'
'W,X,S,X,X,N,X,X,'
'SW,X,NEGC,W,X,X,X,X,'
'NW,S,X,X,X,X,E,SWRT,'
'WE,NWYQ,X,X,X,X,X,NS,'
'W,X,X,X,X,X,E,NW'
)
QUAD_4B = (
'NW,N,N,NE,NW,N,N,N,'
'WE,SWRT,X,X,X,X,S,X,'
'W,N,X,X,X,X,NEGC,W,'
'W,X,X,X,X,X,X,X,'
'W,X,SEBH,W,X,X,X,S,'
'SW,X,N,X,X,X,E,NWYQ,'
'NW,X,X,X,X,X,X,S,'
'W,X,X,X,X,X,E,NW'
)
QUADS = [
(QUAD_1A, QUAD_1B),
(QUAD_2A, QUAD_2B),
(QUAD_3A, QUAD_3B),
(QUAD_4A, QUAD_4B),
]
# Rotation
ROTATE_QUAD = [
56, 48, 40, 32, 24, 16, 8, 0,
57, 49, 41, 33, 25, 17, 9, 1,
58, 50, 42, 34, 26, 18, 10, 2,
59, 51, 43, 35, 27, 19, 11, 3,
60, 52, 44, 36, 28, 20, 12, 4,
61, 53, 45, 37, 29, 21, 13, 5,
62, 54, 46, 38, 30, 22, 14, 6,
63, 55, 47, 39, 31, 23, 15, 7,
]
ROTATE_WALL = {
NORTH: EAST,
EAST: SOUTH,
SOUTH: WEST,
WEST: NORTH,
}
# Helper Functions
def idx(x, y, size=16):
return y * size + x
def xy(index, size=16):
x = index % size
y = index / size
return (x, y)
def rotate_quad(data, times=1):
for i in range(times):
result = [data[index] for index in ROTATE_QUAD]
result = [''.join(ROTATE_WALL.get(c, c) for c in x) for x in result]
data = result
return data
def create_grid(quads=None):
if quads is None:
quads = [random.choice(pair) for pair in QUADS]
random.shuffle(quads)
quads = [quad.split(',') for quad in quads]
quads = [rotate_quad(quads[i], i) for i in [0, 1, 3, 2]]
result = [None for i in range(16 * 16)]
for i, quad in enumerate(quads):
dx, dy = xy(i, 2)
for j, data in enumerate(quad):
x, y = xy(j, 8)
x += dx * 8
y += dy * 8
index = idx(x, y)
result[index] = data
return result
def to_mask(cell):
result = 0
for letter, mask in M_LOOKUP.items():
if letter in cell:
result |= mask
return result
# Game
class Game(object):
@staticmethod
def hardest():
quads = [QUAD_2B, QUAD_4B, QUAD_3B, QUAD_1B]
robots = [226, 48, 43, 18]
token = 'BT'
return Game(quads=quads, robots=robots, token=token)
def __init__(self, seed=None, quads=None, robots=None, token=None):
if seed:
random.seed(seed)
self.grid = create_grid(quads)
if robots is None:
self.robots = self.place_robots()
else:
self.robots = dict(zip(COLORS, robots))
self.token = token or random.choice(TOKENS)
self.moves = 0
self.last = None
def place_robots(self):
result = {}
used = set()
for color in COLORS:
while True:
index = random.randint(0, 255)
if index in (119, 120, 135, 136):
continue
if self.grid[index][-2:] in TOKENS:
continue
if index in used:
continue
result[color] = index
used.add(index)
break
return result
def get_robot(self, index):
for color, position in self.robots.iteritems():
if position == index:
return color
return None
def can_move(self, color, direction):
if self.last == (color, REVERSE[direction]):
return False
index = self.robots[color]
if direction in self.grid[index]:
return False
new_index = index + OFFSET[direction]
if new_index in self.robots.itervalues():
return False
return True
def compute_move(self, color, direction):
index = self.robots[color]
robots = self.robots.values()
while True:
if direction in self.grid[index]:
break
new_index = index + OFFSET[direction]
if new_index in robots:
break
index = new_index
return index
def do_move(self, color, direction):
start = self.robots[color]
last = self.last
if last == (color, REVERSE[direction]):
print 'reverse'
#raise Exception
end = self.compute_move(color, direction)
if start == end:
print 'wall move'
#raise Exception
self.moves += 1
self.robots[color] = end
self.last = (color, direction)
return (color, start, last)
def undo_move(self, data):
color, start, last = data
self.moves -= 1
self.robots[color] = start
self.last = last
def get_moves(self, colors=None):
result = []
colors = colors or COLORS
for color in colors:
for direction in DIRECTIONS:
if self.can_move(color, direction):
result.append((color, direction))
return result
def over(self):
color = self.token[0]
return self.token in self.grid[self.robots[color]]
def key(self):
return tuple(self.robots.itervalues())
def search(self):
max_depth = 1
while True:
#print 'Searching to depth:', max_depth
result = self._search([], set(), 0, max_depth)
if result is not None:
return result
max_depth += 1
def _search(self, path, memo, depth, max_depth):
if self.over():
return list(path)
if depth == max_depth:
return None
key = (depth, self.key())
if key in memo:
return None
memo.add(key)
if depth == max_depth - 1:
colors = [self.token[0]]
else:
colors = None
moves = self.get_moves(colors)
for move in moves:
data = self.do_move(*move)
path.append(move)
result = self._search(path, memo, depth + 1, max_depth)
path.pop(-1)
self.undo_move(data)
if result:
return result
return None
def export(self):
grid = []
token = None
robots = [self.robots[color] for color in COLORS]
for index, cell in enumerate(self.grid):
mask = to_mask(cell)
if index in robots:
mask |= M_ROBOT
grid.append(mask)
if self.token in cell:
token = index
robot = COLORS.index(self.token[0])
return {
'grid': grid,
'robot': robot,
'token': token,
'robots': robots,
}
def export2(self):
grid = []
token = None
robots = [self.robots[color] for color in COLORS]
for index, cell in enumerate(self.grid):
mask = to_mask(cell)
grid.append(mask)
if self.token in cell:
token = index
robot = COLORS.index(self.token[0])
return {
'grid': grid,
'robot': robot,
'token': token,
'robots': robots,
}
| bsd-2-clause | -6,690,397,051,170,826,000 | 23.439791 | 76 | 0.487575 | false |
wolfgangmauerer/prosoda | prosoda/interactive.py | 1 | 1232 | # Commands that are useful after adist.yp has been
# run in ipython
# This file is part of prosoda. prosoda is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Copyright 2010, 2011, 2012 by Wolfgang Mauerer <wm@linux-kernel.net>
# All Rights Reserved.
initialiseR()
git = shelve.open("/home/wolfgang/linux-14-33")["git"]
res = createSeries(git, "__main__", ["v2.6.24", "v2.6.25"])
writeToFile(res, "/home/wolfgang/raw.dat")
runR('raw = as.xts(read.zoo(file="/home/wolfgang/raw.dat", FUN=tstamp_to_date))')
runR('reg = to.regts(raw[,1], 250)')
reg = RtoPython(runR('reg'))
raw = RtoPython(runR('raw'))
# ... and then commence with the analysis as desired
| gpl-2.0 | -7,668,178,328,738,901,000 | 41.482759 | 81 | 0.729708 | false |
kdebrab/pandas | pandas/core/indexes/category.py | 1 | 30548 | import operator
import numpy as np
from pandas._libs import index as libindex
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.generic import ABCCategorical, ABCSeries
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
is_categorical_dtype,
ensure_platform_int,
is_list_like,
is_interval_dtype,
is_scalar)
from pandas.core.dtypes.missing import array_equivalent, isna
from pandas.core.algorithms import take_1d
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.config import get_option
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core import accessor
import pandas.core.common as com
import pandas.core.missing as missing
import pandas.core.indexes.base as ibase
from pandas.core.arrays.categorical import Categorical, contains
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass='CategoricalIndex'))
class CategoricalIndex(Index, accessor.PandasDelegate):
"""
Immutable Index implementing an ordered, sliceable set. CategoricalIndex
represents a sparsely populated Index with an underlying Categorical.
Parameters
----------
data : array-like or Categorical, (1-dimensional)
categories : optional, array-like
categories for the CategoricalIndex
ordered : boolean,
designating if the categories are ordered
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Attributes
----------
codes
categories
ordered
Methods
-------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
as_ordered
as_unordered
map
See Also
--------
Categorical, Index
"""
_typ = 'categoricalindex'
_engine_type = libindex.Int64Engine
_attributes = ['name']
def __new__(cls, data=None, categories=None, ordered=None, dtype=None,
copy=False, name=None, fastpath=False):
if fastpath:
return cls._simple_new(data, name=name, dtype=dtype)
if name is None and hasattr(data, 'name'):
name = data.name
if isinstance(data, ABCCategorical):
data = cls._create_categorical(data, categories, ordered,
dtype)
elif isinstance(data, CategoricalIndex):
data = data._data
data = cls._create_categorical(data, categories, ordered,
dtype)
else:
# don't allow scalars
# if data is None, then categories must be provided
if is_scalar(data):
if data is not None or categories is None:
cls._scalar_data_error(data)
data = []
data = cls._create_categorical(data, categories, ordered,
dtype)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
def _create_from_codes(self, codes, categories=None, ordered=None,
name=None):
"""
*this is an internal non-public method*
create the correct categorical from codes
Parameters
----------
codes : new codes
categories : optional categories, defaults to existing
ordered : optional ordered attribute, defaults to existing
name : optional name attribute, defaults to existing
Returns
-------
CategoricalIndex
"""
if categories is None:
categories = self.categories
if ordered is None:
ordered = self.ordered
if name is None:
name = self.name
cat = Categorical.from_codes(codes, categories=categories,
ordered=self.ordered)
return CategoricalIndex(cat, name=name)
@classmethod
def _create_categorical(cls, data, categories=None, ordered=None,
dtype=None):
"""
*this is an internal non-public method*
create the correct categorical from data and the properties
Parameters
----------
data : data for new Categorical
categories : optional categories, defaults to existing
ordered : optional ordered attribute, defaults to existing
dtype : CategoricalDtype, defaults to existing
Returns
-------
Categorical
"""
if (isinstance(data, (cls, ABCSeries)) and
is_categorical_dtype(data)):
data = data.values
if not isinstance(data, ABCCategorical):
if ordered is None and dtype is None:
ordered = False
data = Categorical(data, categories=categories, ordered=ordered,
dtype=dtype)
else:
if categories is not None:
data = data.set_categories(categories, ordered=ordered)
elif ordered is not None and ordered != data.ordered:
data = data.set_ordered(ordered)
if isinstance(dtype, CategoricalDtype) and dtype != data.dtype:
# we want to silently ignore dtype='category'
data = data._set_dtype(dtype)
return data
@classmethod
def _simple_new(cls, values, name=None, categories=None, ordered=None,
dtype=None, **kwargs):
result = object.__new__(cls)
values = cls._create_categorical(values, categories, ordered,
dtype=dtype)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
result._reset_identity()
return result
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, categories=None, ordered=None,
dtype=None, **kwargs):
# categories and ordered can't be part of attributes,
# as these are properties
# we want to reuse self.dtype if possible, i.e. neither are
# overridden.
if dtype is not None and (categories is not None or
ordered is not None):
raise TypeError("Cannot specify both `dtype` and `categories` "
"or `ordered`")
if categories is None and ordered is None:
dtype = self.dtype if dtype is None else dtype
return super(CategoricalIndex, self)._shallow_copy(
values=values, dtype=dtype, **kwargs)
if categories is None:
categories = self.categories
if ordered is None:
ordered = self.ordered
return super(CategoricalIndex, self)._shallow_copy(
values=values, categories=categories,
ordered=ordered, **kwargs)
def _is_dtype_compat(self, other):
"""
*this is an internal non-public method*
provide a comparison between the dtype of self and other (coercing if
needed)
Raises
------
TypeError if the dtypes are not compatible
"""
if is_categorical_dtype(other):
if isinstance(other, CategoricalIndex):
other = other._values
if not other.is_dtype_equal(self):
raise TypeError("categories must match existing categories "
"when appending")
else:
values = other
if not is_list_like(values):
values = [values]
other = CategoricalIndex(self._create_categorical(
other, dtype=self.dtype))
if not other.isin(values).all():
raise TypeError("cannot append a non-category item to a "
"CategoricalIndex")
return other
def equals(self, other):
"""
Determines if two CategorialIndex objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
try:
other = self._is_dtype_compat(other)
return array_equivalent(self._data, other)
except (TypeError, ValueError):
pass
return False
@property
def _formatter_func(self):
return self.categories._formatter_func
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
attrs = [
('categories',
ibase.default_pprint(self.categories,
max_seq_items=max_categories)),
('ordered', self.ordered)]
if self.name is not None:
attrs.append(('name', ibase.default_pprint(self.name)))
attrs.append(('dtype', "'%s'" % self.dtype.name))
max_seq_items = get_option('display.max_seq_items') or len(self)
if len(self) > max_seq_items:
attrs.append(('length', len(self)))
return attrs
@property
def inferred_type(self):
return 'categorical'
@property
def values(self):
""" return the underlying data, which is a Categorical """
return self._data
@property
def itemsize(self):
# Size of the items in categories, not codes.
return self.values.itemsize
def get_values(self):
""" return the underlying data as an ndarray """
return self._data.get_values()
def tolist(self):
return self._data.tolist()
@property
def codes(self):
return self._data.codes
@property
def categories(self):
return self._data.categories
@property
def ordered(self):
return self._data.ordered
def _reverse_indexer(self):
return self._data._reverse_indexer()
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.hasnans
return contains(self, key, container=self._engine)
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
return key in self
def __array__(self, dtype=None):
""" the array interface, return my values """
return np.array(self._data, dtype=dtype)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_interval_dtype(dtype):
from pandas import IntervalIndex
return IntervalIndex(np.array(self))
elif is_categorical_dtype(dtype):
# GH 18630
dtype = self.dtype.update_dtype(dtype)
if dtype == self.dtype:
return self.copy() if copy else self
return super(CategoricalIndex, self).astype(dtype=dtype, copy=copy)
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
return self._data.codes == -1
@Appender(ibase._index_shared_docs['fillna'])
def fillna(self, value, downcast=None):
self._assert_can_do_op(value)
return CategoricalIndex(self._data.fillna(value), name=self.name)
def argsort(self, *args, **kwargs):
return self.values.argsort(*args, **kwargs)
@cache_readonly
def _engine(self):
# we are going to look things up with the codes themselves
return self._engine_type(lambda: self.codes.astype('i8'), len(self))
# introspection
@cache_readonly
def is_unique(self):
return self._engine.is_unique
@property
def is_monotonic_increasing(self):
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
return self._engine.is_monotonic_decreasing
@Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs)
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
result = self.values.unique()
# CategoricalIndex._shallow_copy keeps original categories
# and ordered if not otherwise specified
return self._shallow_copy(result, categories=result.categories,
ordered=result.ordered)
@Appender(Index.duplicated.__doc__)
def duplicated(self, keep='first'):
from pandas._libs.hashtable import duplicated_int64
codes = self.codes.astype('i8')
return duplicated_int64(codes, keep)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.astype('object')
def get_loc(self, key, method=None):
"""
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None}
* default: exact matches only.
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
---------
>>> unique_index = pd.CategoricalIndex(list('abc'))
>>> unique_index.get_loc('b')
1
>>> monotonic_index = pd.CategoricalIndex(list('abbc'))
>>> monotonic_index.get_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = pd.CategoricalIndex(list('abcb'))
>>> non_monotonic_index.get_loc('b')
array([False, True, False, True], dtype=bool)
"""
codes = self.categories.get_loc(key)
if (codes == -1):
raise KeyError(key)
return self._engine.get_loc(codes)
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
try:
k = com._values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
indexer = self.get_loc(k)
return series.iloc[indexer]
except (KeyError, TypeError):
pass
# we might be a positional inexer
return super(CategoricalIndex, self).get_value(series, key)
def _can_reindex(self, indexer):
""" always allow reindexing """
pass
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
cat = Categorical(values,
categories=self.categories,
ordered=self.ordered)
return self._shallow_copy(cat, **self._get_attributes_dict())
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
if method is not None:
raise NotImplementedError("argument method is not implemented for "
"CategoricalIndex.reindex")
if level is not None:
raise NotImplementedError("argument level is not implemented for "
"CategoricalIndex.reindex")
if limit is not None:
raise NotImplementedError("argument limit is not implemented for "
"CategoricalIndex.reindex")
target = ibase.ensure_index(target)
if not is_categorical_dtype(target) and not target.is_unique:
raise ValueError("cannot reindex with a non-unique indexer")
indexer, missing = self.get_indexer_non_unique(np.array(target))
if len(self.codes):
new_target = self.take(indexer)
else:
new_target = target
# filling in missing if needed
if len(missing):
cats = self.categories.get_indexer(target)
if (cats == -1).any():
# coerce to a regular index here!
result = Index(np.array(self), name=self.name)
new_target, indexer, _ = result._reindex_non_unique(
np.array(target))
else:
codes = new_target.codes.copy()
codes[indexer == -1] = cats[missing]
new_target = self._create_from_codes(codes)
# we always want to return an Index type here
# to be consistent with .reindex for other index types (e.g. they don't
# coerce based on the actual values, only on the dtype)
# unless we had an initial Categorical to begin with
# in which case we are going to conform to the passed Categorical
new_target = np.asarray(new_target)
if is_categorical_dtype(target):
new_target = target._shallow_copy(new_target, name=self.name)
else:
new_target = Index(new_target, name=self.name)
return new_target, indexer
def _reindex_non_unique(self, target):
""" reindex from a non-unique; which CategoricalIndex's are almost
always
"""
new_target, indexer = self.reindex(target)
new_indexer = None
check = indexer == -1
if check.any():
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[check] = -1
cats = self.categories.get_indexer(target)
if not (cats == -1).any():
# .reindex returns normal Index. Revert to CategoricalIndex if
# all targets are included in my categories
new_target = self._shallow_copy(new_target)
return new_target, indexer, new_indexer
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
from pandas.core.arrays.categorical import _recode_for_categories
method = missing.clean_reindex_fill_method(method)
target = ibase.ensure_index(target)
if self.is_unique and self.equals(target):
return np.arange(len(self), dtype='intp')
if method == 'pad' or method == 'backfill':
raise NotImplementedError("method='pad' and method='backfill' not "
"implemented yet for CategoricalIndex")
elif method == 'nearest':
raise NotImplementedError("method='nearest' not implemented yet "
'for CategoricalIndex')
if (isinstance(target, CategoricalIndex) and
self.values.is_dtype_equal(target)):
if self.values.equals(target.values):
# we have the same codes
codes = target.codes
else:
codes = _recode_for_categories(target.codes,
target.categories,
self.values.categories)
else:
if isinstance(target, CategoricalIndex):
code_indexer = self.categories.get_indexer(target.categories)
codes = take_1d(code_indexer, target.codes, fill_value=-1)
else:
codes = self.categories.get_indexer(target)
indexer, _ = self._engine.get_indexer_non_unique(codes)
return ensure_platform_int(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = ibase.ensure_index(target)
if isinstance(target, CategoricalIndex):
# Indexing on codes is more efficient if categories are the same:
if target.categories is self.categories:
target = target.codes
indexer, missing = self._engine.get_indexer_non_unique(target)
return ensure_platform_int(indexer), missing
target = target.values
codes = self.categories.get_indexer(target)
indexer, missing = self._engine.get_indexer_non_unique(codes)
return ensure_platform_int(indexer), missing
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
if self.categories._defer_to_indexing:
return self.categories._convert_scalar_indexer(key, kind=kind)
return super(CategoricalIndex, self)._convert_scalar_indexer(
key, kind=kind)
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
# Return our indexer or raise if all of the values are not included in
# the categories
if self.categories._defer_to_indexing:
indexer = self.categories._convert_list_indexer(keyarr, kind=kind)
return Index(self.codes).get_indexer_for(indexer)
indexer = self.categories.get_indexer(np.asarray(keyarr))
if (indexer == -1).any():
raise KeyError(
"a list-indexer must only "
"include values that are "
"in the categories")
return self.get_indexer(keyarr)
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
keyarr = com._asarray_tuplesafe(keyarr)
if self.categories._defer_to_indexing:
return keyarr
return self._shallow_copy(keyarr)
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
return self._shallow_copy(keyarr)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = ensure_platform_int(indices)
taken = self._assert_take_fillable(self.codes, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1)
return self._create_from_codes(taken)
def is_dtype_equal(self, other):
return self._data.is_dtype_equal(other)
take_nd = take
def map(self, mapper):
"""
Map values using input correspondence (a dict, Series, or function).
Maps the values (their categories, not the codes) of the index to new
categories. If the mapping correspondence is one-to-one the result is a
:class:`~pandas.CategoricalIndex` which has the same order property as
the original, otherwise an :class:`~pandas.Index` is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.CategoricalIndex or pandas.Index
Mapped index.
See Also
--------
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'])
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=False, dtype='category')
>>> idx.map(lambda x: x.upper())
CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],
ordered=False, dtype='category')
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'})
CategoricalIndex(['first', 'second', 'third'], categories=['first',
'second', 'third'], ordered=False, dtype='category')
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True)
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=True, dtype='category')
>>> idx.map({'a': 3, 'b': 2, 'c': 1})
CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True,
dtype='category')
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> idx.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
return self._shallow_copy_with_infer(self.values.map(mapper))
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._create_from_codes(np.delete(self.codes, loc))
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
Raises
------
ValueError if the item is not in the categories
"""
code = self.categories.get_indexer([item])
if (code == -1) and not (is_scalar(item) and isna(item)):
raise TypeError("cannot insert an item into a CategoricalIndex "
"that is not already an existing category")
codes = self.codes
codes = np.concatenate((codes[:loc], code, codes[loc:]))
return self._create_from_codes(codes)
def _concat(self, to_concat, name):
# if calling index is category, don't check dtype of others
return CategoricalIndex._concat_same_dtype(self, to_concat, name)
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
ValueError if other is not in the categories
"""
to_concat = [self._is_dtype_compat(c) for c in to_concat]
codes = np.concatenate([c.codes for c in to_concat])
result = self._create_from_codes(codes, name=name)
# if name is None, _create_from_codes sets self.name
result.name = name
return result
def _codes_for_groupby(self, sort, observed):
""" Return a Categorical adjusted for groupby """
return self.values._codes_for_groupby(sort, observed)
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
def _make_compare(op):
opname = '__{op}__'.format(op=op.__name__)
def _evaluate_compare(self, other):
# if we have a Categorical type, then must have the same
# categories
if isinstance(other, CategoricalIndex):
other = other._values
elif isinstance(other, Index):
other = self._create_categorical(
other._values, dtype=self.dtype)
if isinstance(other, (ABCCategorical, np.ndarray,
ABCSeries)):
if len(self.values) != len(other):
raise ValueError("Lengths must match to compare")
if isinstance(other, ABCCategorical):
if not self.values.is_dtype_equal(other):
raise TypeError("categorical index comparisons must "
"have the same categories and ordered "
"attributes")
result = op(self.values, other)
if isinstance(result, ABCSeries):
# Dispatch to pd.Categorical returned NotImplemented
# and we got a Series back; down-cast to ndarray
result = result.values
return result
return compat.set_function_name(_evaluate_compare, opname, cls)
cls.__eq__ = _make_compare(operator.eq)
cls.__ne__ = _make_compare(operator.ne)
cls.__lt__ = _make_compare(operator.lt)
cls.__gt__ = _make_compare(operator.gt)
cls.__le__ = _make_compare(operator.le)
cls.__ge__ = _make_compare(operator.ge)
def _delegate_method(self, name, *args, **kwargs):
""" method delegation to the ._values """
method = getattr(self._values, name)
if 'inplace' in kwargs:
raise ValueError("cannot use inplace with CategoricalIndex")
res = method(*args, **kwargs)
if is_scalar(res):
return res
return CategoricalIndex(res, name=self.name)
@classmethod
def _add_accessors(cls):
""" add in Categorical accessor methods """
CategoricalIndex._add_delegate_accessors(
delegate=Categorical, accessors=["rename_categories",
"reorder_categories",
"add_categories",
"remove_categories",
"remove_unused_categories",
"set_categories",
"as_ordered", "as_unordered",
"min", "max"],
typ='method', overwrite=True)
CategoricalIndex._add_numeric_methods_add_sub_disabled()
CategoricalIndex._add_numeric_methods_disabled()
CategoricalIndex._add_logical_methods_disabled()
CategoricalIndex._add_comparison_methods()
CategoricalIndex._add_accessors()
| bsd-3-clause | -6,292,839,324,184,888,000 | 34.52093 | 79 | 0.57097 | false |
chop-dbhi/varify-data-warehouse | vdw/genes/models.py | 1 | 4984 | from django.db import models
from django.contrib.auth.models import User
from objectset.models import ObjectSet, SetObject
from vdw.literature.models import PubMed
from vdw.genome.models import Chromosome
from vdw.phenotypes.models import Phenotype, PhenotypeThrough
from .managers import GeneManager
class GeneFamily(models.Model):
"Gene family tags and descriptions."
tag = models.CharField(max_length=30, null=True)
description = models.CharField(max_length=200, null=True)
class Meta(object):
db_table = 'gene_family'
class Synonym(models.Model):
"""Model which contains known alternate gene names and symbols for
the canonical genes. This can be used as an index for search-related
queries.
"""
# Call it a label since this may be a symbol, a name or something else
label = models.CharField(max_length=255, db_index=True)
class Meta(object):
db_table = 'synonym'
class Gene(models.Model):
"""Unified gene model. This includes data from multiple sources with
the appropriate `id` defined to which references the source. If multiple
sources contain have overlap, the respective `id`s will be filled in.
The canonical source is HGNC, which approves gene names and symbols, the
`approved` flag should be set if this is the approved gene name and
symbol by HGNC.
"""
chr = models.ForeignKey(Chromosome)
symbol = models.CharField(max_length=255, db_index=True)
name = models.TextField('full name', blank=True)
hgnc_id = models.IntegerField('HGNC ID', null=True, blank=True)
# Via the HGNC documentation: "Families/groups may be either structural or
# functional, therefore a gene may belong to more than one family/group"
families = models.ManyToManyField(GeneFamily, blank=True)
# Literature
articles = models.ManyToManyField(PubMed, db_table='gene_pubmed')
# Synonyms
synonyms = models.ManyToManyField(Synonym, db_table='gene_synonym')
# Phenotypes
phenotypes = models.ManyToManyField(Phenotype, through='GenePhenotype')
objects = GeneManager()
class Meta(object):
db_table = 'gene'
def __unicode__(self):
return self.symbol
def approved(self):
return self.hgnc_id is not None
def hgnc_url(self):
if self.hgnc_id:
return 'http://www.genenames.org/data/hgnc_data.php?hgnc_id=' + \
str(self.hgnc_id)
class GenePhenotype(PhenotypeThrough):
gene = models.ForeignKey(Gene)
class Meta(object):
db_table = 'gene_phenotype'
class Exon(models.Model):
"Gene-specific exon region"
gene = models.ForeignKey(Gene)
index = models.IntegerField('exon index')
start = models.IntegerField('exon start position')
end = models.IntegerField('exon end position')
class Meta(object):
db_table = 'exon'
class Transcript(models.Model):
"Gene transcripts"
refseq_id = models.CharField(max_length=100, unique=True)
strand = models.CharField(max_length=1, null=True, blank=True,
help_text='+ or - for strand')
start = models.IntegerField('transcript start position', null=True,
blank=True)
end = models.IntegerField('transcript end position', null=True, blank=True)
coding_start = models.IntegerField('coding region start position',
null=True, blank=True)
coding_end = models.IntegerField('coding region end position', null=True,
blank=True)
coding_start_status = models.CharField('coding region start status',
max_length=20, null=True,
blank=True)
coding_end_status = models.CharField('coding region end status',
max_length=20, null=True, blank=True)
exon_count = models.IntegerField('number of exons', null=True, blank=True)
gene = models.ForeignKey(Gene, null=True, blank=True)
exons = models.ManyToManyField(Exon, db_table='transcript_exon')
class Meta(object):
db_table = 'transcript'
def ncbi_url(self):
return 'http://www.ncbi.nlm.nih.gov/nuccore/' + self.refseq_id
class GeneSet(ObjectSet):
user = models.ForeignKey(User, null=True, blank=True)
name = models.CharField(max_length=100, null=True, blank=True)
genes = models.ManyToManyField(Gene, through='GeneSetObject')
published = models.BooleanField(default=True)
set_object_rel = 'genes'
label_field = 'name'
def __unicode__(self):
return unicode(self.name)
class Meta(object):
db_table = 'geneset'
ordering = ('user', 'name',)
class GeneSetObject(SetObject):
object_set = models.ForeignKey(GeneSet, db_column='set_id')
set_object = models.ForeignKey(Gene, db_column='object_id')
class Meta(object):
db_table = 'geneset_setobject'
| bsd-2-clause | 2,703,393,772,772,176,400 | 33.372414 | 79 | 0.660112 | false |
numericube/twistranet | twistranet/twistapp/forms/fields.py | 1 | 7275 | """
The twistranet Fields
"""
import os
import urlparse
from django import forms
from django.core.validators import URL_VALIDATOR_USER_AGENT
from django.db import models
from django.core.validators import EMPTY_VALUES
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext as _
from twistranet.twistapp.lib.log import log
import widgets
from validators import URLValidator, ViewPathValidator
class PermissionFormField(forms.ChoiceField):
"""
This overrides the regular ChoiceField to add additional rendering.
"""
widget = widgets.PermissionsWidget
def __init__(
self, choices = (), required=True, widget=None, max_length = None,
label=None, initial=None, help_text=None, to_field_name=None,
*args, **kwargs
):
super(PermissionFormField, self).__init__(choices, required, widget, label, initial, help_text, *args, **kwargs)
# We put this here to avoid import errors
self.default_error_messages = {
'invalid_choice': _(u'Select a valid choice. That choice is not one of'
u' the available choices.'),
}
class PermissionsFormField(forms.ChoiceField):
"""
This overrides the regular ChoiceField to add additional rendering.
"""
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
for id, name, description in self.choices:
if value == smart_unicode(id):
return True
return False
class ModelInputField(forms.Field):
"""
This is a field used to enter a foreign key value inside a classic Input widget.
This is used when there are a lot of values to check against (and ModelChoiceField is not
efficient anymore), plus the value is checked against the QuerySet very late in the process.
"""
def __init__(
self, model, filter = None, required=True, widget=None,
label=None, initial=None, help_text=None, to_field_name=None,
*args, **kwargs
):
super(ModelInputField, self).__init__(required, widget, label, initial, help_text,
*args, **kwargs)
self.model = model
self.filter = filter
self.to_field_name = to_field_name
# We put this here to avoid import errors
self.default_error_messages = {
'invalid_choice': _(u'Select a valid choice. That choice is not one of'
u' the available choices.'),
}
def to_python(self, value):
"""
'Resolve' the query set at validation time.
This way, we're sure to have the freshest version of the QS.
"""
if value in EMPTY_VALUES:
return None
try:
key = self.to_field_name or 'pk'
qs = self.model.objects.get_query_set()
if self.filter:
qs = qs.filter(self.filter)
value = qs.get(**{key: value})
except self.queryset.model.DoesNotExist:
raise ValidationError(self.error_messages['invalid_choice'])
return value
class ResourceFormField(forms.MultiValueField):
"""
The ResourceFormField is a resource browser.
You can pass it a few parameters:
- model which is the subclass you want to read your resources from (default: twistranet.Resource).
Useful if you want to display only images for example.
- filter which will be passed to model.objects.filter() call before rendering the widget.
These model / filter params are the only solution to handle choices WITH the security model.
- allow_upload (upload is ok)
- allow_select (can select an existing resource from the given filter)
"""
widget = widgets.ResourceWidget
field = ModelInputField
model = None
filter = None
def __init__(self, *args, **kwargs):
# Initial values
from twistranet.twistapp.models import Resource
self.model = kwargs.pop("model", Resource)
self.filter = kwargs.pop("filter", None)
self.allow_upload = kwargs.pop("allow_upload", True)
self.allow_select = kwargs.pop("allow_select", True)
self.display_renderer = kwargs.pop("display_renderer", True)
self.media_type = kwargs.pop("media_type", 'file')
self.widget = kwargs.pop("widget", self.widget(
model = self.model, filter = self.filter,
allow_upload = self.allow_upload,
allow_select = self.allow_select,
display_renderer = self.display_renderer,
media_type = self.media_type
))
self.required = kwargs.pop("required", True)
# The fields we'll use:
# - A ModelInputField used to handle the ForeignKey.
# - A FileField used to handle data upload.
fields = []
field0 = self.field(model = self.model, filter = self.filter, required = self.required)
# no more used
# field1 = forms.FileField(required = False)
dummy = forms.CharField(required = False)
if self.allow_select or self.allow_upload:
fields.append(field0)
else:
fields.append(dummy)
# # Compatibility with form_for_instance
# if kwargs.get('initial'):
# initial = kwargs['initial']
# else:
# initial = None
# self.widget = self.widget(initial=initial)
super(ResourceFormField, self).__init__(fields, label = kwargs.pop('label'), required = False) #self.required)
def prepare_value(self, value):
"""
Pass the query_set to the underlying widget, so that it's computed as late as possible.
"""
qs = self.model.objects.get_query_set()
if self.filter:
qs = qs.filter(self.filter)
self.widget.query_set = qs
return super(ResourceFormField, self).prepare_value(value)
def compress(self, data_list):
return data_list
# URLField which also accept relative urls
class LargeURLField(forms.CharField):
"""
A URL field which accepts internal link
and intranet links (without a standard domain)
"""
def __init__(self, max_length=None, min_length=None, verify_exists=False,
validator_user_agent=URL_VALIDATOR_USER_AGENT, *args, **kwargs):
super(LargeURLField, self).__init__(max_length, min_length, *args,
**kwargs)
self.validators.append(URLValidator(verify_exists=verify_exists, validator_user_agent=validator_user_agent))
def to_python(self, value):
if value:
value = urlparse.urlunparse(urlparse.urlparse(value))
return super(LargeURLField, self).to_python(value)
class ViewPathField(forms.CharField):
"""
View Path field (could be improved)
"""
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ViewPathField, self).__init__(max_length, min_length, *args,
**kwargs)
self.validators.append(ViewPathValidator())
self.default_error_messages = { 'invalid': _(u'Enter a valid Path.'),}
| agpl-3.0 | 4,674,477,367,613,389,000 | 37.492063 | 120 | 0.619794 | false |
PhyloStar/PyBayes | params_moves.py | 1 | 1027 | import numpy as np
from scipy.stats import dirichlet
import random, math
dir_alpha = 100.0
scaler_alpha = 1.25
epsilon = 1e-10
def mvDirichlet(pi):
pi_new = dirichlet.rvs(dir_alpha*pi)[0]
#print(pi, pi_new)
hastings_ratio = dirichlet.logpdf(pi, pi_new) - dirichlet.logpdf(pi_new, pi)
return pi_new, hastings_ratio
def mvDualSlider(pi):
i, j = random.sample(range(pi.shape[0]),2 )
sum_ij = pi[i]+pi[j]
x = random.uniform(epsilon, sum_ij)
y = sum_ij -x
pi[i], pi[j] = x, y
return pi, 0.0
def mvScaler(x):
log_c = scaler_alpha*(np.random.uniform()-0.5)
c = math.exp(log_c)
x_new = x*c
return x_new, log_c
def mvVecScaler(X):
log_c = scaler_alpha*(np.random.uniform()-0.5)
c = math.exp(log_c)
X_new = X*c
return X_new, log_c
def mvSlider(x, a, b):
""" a and b are bounds
"""
x_hat = np.random.uniform(x-0.5, x+0.5)
if x_hat < a:
return 2.0*a -x_hat
elif xhat > b:
return 2.0*b -x_hat
else:
return x_hat
| gpl-2.0 | -2,911,197,455,575,236,000 | 21.822222 | 80 | 0.589094 | false |
The-WebOps-Club/odia-forum | pybbm_tag/views.py | 1 | 2818 | from django.shortcuts import render
from pybb.models import *
from pybbm_tag.models import Tag
from pybb.views import ForumView,AddPostView,EditPostView,TopicView
def add_tag(request,**kwargs):
# check permissions before calling this function
# in kwargs we expect the LABEL of the tag to add(not object) and the TOPIC object(not name).
topic = kwargs['topic']
tagname = kwargs['tag']
lst = Tag.objects.filter(label = tagname)
if not lst.count() == 0:
lst[0].topics.add(topic)
lst[0].save()
else:
tag = Tag(label = tagname,desc="Empty")
tag.save()
tag.topics.add(topic)
def remove_all_tags(request,**kwargs):
topic = kwargs['topic']
for i in Tag.objects.filter(topics__in = [topic]):
i.topics.remove(topic)
def remove_tag(request,**kwargs):
# check permissions before calling this function.
topic = kwargs['topic']
tagname = kwargs['tag']
lst = Tag.objects.filter(label = tagname)
lst[0].topics.remove(topic)
# tag additions to the views that are affected by tags.
class AddPostViewWrapper(AddPostView):
def post(self, request, *args, **kwargs):
try:
ret = super(AddPostViewWrapper, self).post(request, *args, **kwargs)
taglist = request.POST['taglist'].split('+')
#import pdb;pdb.set_trace()
for i in taglist:
add_tag(request, topic=self.object.topic, tag=i)
except KeyError:
pass
return ret
def get_context_data(self,**kwargs):
ctx = super(AddPostViewWrapper, self).get_context_data(**kwargs)
if ctx['forum']:
ctx['taglist_input'] = 1
return ctx
class ForumViewWrapper(ForumView):
def get_context_data(self):
ctx = super(ForumViewWrapper, self).get_context_data()
topic_list = ctx['topic_list']
tags = []
for i in topic_list:
tags.append(Tag.objects.filter(topics__in = [i]))
ctx['tags'] = Tag.objects.all()
return ctx
class TopicViewWrapper(TopicView):
def get_context_data(self):
ctx = super(TopicViewWrapper, self).get_context_data()
ctx['tags'] = Tag.objects.all()
return ctx
class EditPostViewWrapper(EditPostView):
def post(self, request, *args, **kwargs):
ret = super(EditPostViewWrapper, self).post(request, *args, **kwargs)
try:
taglist = request.POST['taglist'].split('+')
remove_all_tags(request, topic=self.object.topic)
for i in taglist:
add_tag(request, topic=self.object.topic, tag=i)
except KeyError:
pass
return ret
def make_tag_string(self,topic):
str = ""
for i in Tag.objects.filter(topics__in = [topic]):
str+=(i.label+"+")
if len(str) > 0:
str = str[:-1]
return str
def get_context_data(self, **kwargs):
ctx = super(EditPostViewWrapper, self).get_context_data(**kwargs)
post = ctx['post']
if post.topic.user == self.request.user:
ctx['taglist_input'] = 1
ctx['taglist_initial'] = self.make_tag_string(post.topic)
return ctx | gpl-2.0 | -5,345,528,752,191,889,000 | 28.061856 | 94 | 0.689851 | false |
orwell-int/agent-server-game-python | setup.py | 1 | 1239 | #!/usr/bin/env python
import setuptools
# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
# exit of python setup.py test # in multiprocessing/util.py _exit_function when
# running python setup.py test (see
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
try:
import multiprocessing
assert multiprocessing
except ImportError:
pass
setuptools.setup(
name='orwell.agent',
version='0.0.1',
description='Agent connecting to the game server.',
author='',
author_email='',
packages=setuptools.find_packages(exclude="test"),
test_suite='nose.collector',
install_requires=['pyzmq', 'cliff'],
tests_require=['nose', 'coverage', 'mock'],
entry_points={
'console_scripts': [
'thought_police = orwell.agent.main:main',
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6'],
python_requires='>=3.6.0',
)
| bsd-3-clause | -2,430,673,641,645,783,000 | 29.769231 | 79 | 0.615819 | false |
tensorflow/examples | tensorflow_examples/lite/model_maker/demo/image_classification_demo_test.py | 1 | 2699 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from unittest.mock import patch
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core import test_util
from tensorflow_examples.lite.model_maker.demo import image_classification_demo
from tflite_model_maker import image_classifier
from_folder_fn = image_classifier.DataLoader.from_folder
def patch_data_loader():
"""Patch to train partial dataset rather than all of them."""
def side_effect(*args, **kwargs):
tf.compat.v1.logging.info('Train on partial dataset')
data_loader = from_folder_fn(*args, **kwargs)
if len(data_loader) > 10: # Trim dataset to at most 10.
data_loader._size = 10
# TODO(b/171449557): Change this once the dataset is lazily loaded.
data_loader._dataset = data_loader._dataset.take(10)
return data_loader
return patch.object(
image_classifier.DataLoader, 'from_folder', side_effect=side_effect)
class ImageClassificationDemoTest(tf.test.TestCase):
def test_image_classification_demo(self):
with patch_data_loader():
with tempfile.TemporaryDirectory() as temp_dir:
# Use cached training data if exists.
data_dir = image_classification_demo.download_demo_data(
cache_dir=test_util.get_cache_dir(temp_dir, 'flower_photos.tgz'),
file_hash='6f87fb78e9cc9ab41eff2015b380011d')
tflite_filename = os.path.join(temp_dir, 'model.tflite')
label_filename = os.path.join(temp_dir, 'labels.txt')
image_classification_demo.run(
data_dir,
temp_dir,
spec='efficientnet_lite0',
epochs=1,
batch_size=1)
self.assertTrue(tf.io.gfile.exists(tflite_filename))
self.assertGreater(os.path.getsize(tflite_filename), 0)
self.assertFalse(tf.io.gfile.exists(label_filename))
if __name__ == '__main__':
# Load compressed models from tensorflow_hub
os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'
tf.test.main()
| apache-2.0 | -3,472,814,965,351,758,300 | 34.513158 | 79 | 0.70804 | false |
MrLucasCardoso/pycards | tests/test_amex.py | 1 | 2451 | import json
import pytest
from pycards import CreditCard
from datetime import datetime
from pycards.settings import FIXTURES_PATH
@pytest.fixture(scope="session")
def data():
with open(FIXTURES_PATH) as data_file:
return json.load(data_file)['AMEX']
def test_init(data):
assert len(data) > 0
cards = [CreditCard(card['name'], code=card['code']) for card in data]
assert len(cards) == len(data)
def test_is_valid(data):
assert all(CreditCard(card['name'], code=card['code']).is_valid for card in data)
def test_brand(data):
cards = [CreditCard(card['name'], code=card['code']) for card in data]
assert len(cards) == len([card for card in cards if card.brand == 'Amex'])
def test_cardholder(data):
cards = [CreditCard(card['name'], code=card['code'], cardholder='TESTE DADOS') for card in data]
assert len(cards) == len([card for card in cards if card.cardholder == 'TESTE DADOS'])
def test_number(data):
numbers = [card['name'] for card in data]
cards = [CreditCard(card['name'], code=card['code']) for card in data]
assert all([True for c in cards if c.number in numbers]) and any([True for c in cards if c.number in numbers])
def test_expires(data):
cards = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2021') for card in data]
assert all(True for c in cards if type(c.expires) == datetime)
def test_expires_string(data):
cards = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2021') for card in data]
assert all(True for c in cards if c.expires_string == '07/21') and any(True for c in cards if c.expires_string == '07/21')
def test_is_not_expired(data):
card = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2021') for card in data][0]
assert not card.is_expired
def test_is_expired(data):
card = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2016') for card in data][0]
assert card.is_expired
def test_code_name(data):
card = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2016') for card in data][0]
assert card.code_name == 'CVV'
def test_code(data):
codes = [card['code'] for card in data]
cards = [CreditCard(card['name'], code=card['code']) for card in data]
assert all([True for c in cards if c.code in codes]) and any([True for c in cards if c.code in codes])
| mit | 1,644,855,712,332,772,600 | 35.044118 | 126 | 0.669931 | false |
lichengshuang/createvhost | others/webvirtmgr/delServer.py | 1 | 1305 | #!/usr/bin/python
#-*-encoding:utf-8-*-
#author: asher
#date: 20160429 on train D909
# this scripts useed for add server ip to webvirtmgr
# if not , each server must add by website,it's too slow, and very not interesting.
# use this , it's make you feel very happy
import sqlite3
try:
conn = sqlite3.connect('../webvirtmgr.sqlite3')
cur = conn.cursor()
print "Input the server ip address like:"
ips = raw_input("Ips 172.23.32:").strip()
ips1 = int(raw_input("Input start last ip num: 1:>").strip())
ips2 = int(raw_input("Input end ip num: 100:>").strip())
# jifang = str(raw_input("DataCenter like:jxq:>").strip())
# login = str(raw_input("User:admin or others:>").strip())
# password = str(raw_input("Password:>").strip())
while True:
if ips1 <= ips2:
ips1 = str(ips1)
newip = ips + "." + ips1
# jifang1 = jifang + "_" + newip
print "Del %s into database\n" % newip
cur.execute("delete from servers_compute where hostname == '%s'" % newip)
ips1 = int(ips1)
ips1 += 1
conn.commit()
else:
break
finally:
allservers = cur.execute("select id,name,hostname,login,type from servers_compute").fetchall()
for i in allservers:
print i
conn.close()
| apache-2.0 | -621,791,100,471,414,800 | 33.342105 | 95 | 0.603065 | false |
louisq/staticguru | utility/artifact_archiver.py | 1 | 4495 | """
The MIT License (MIT)
Copyright (c) 2016 Louis-Philippe Querel l_querel@encs.concordia.ca
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import glob
import os
import shutil
from Logging import logger
"""
The purpose of this utility is to clone the artifacts that have been generated through the build process to preserve them
This version would probably only work for maven run projects
"""
FILTERED_EXTENSIONS = ('*.jar', '*.tar.*', '*.zip', '*.rpm')
# todo replace with an abstract solution that could be reused for the other modules to log the version that was ran
artifact_archiver_version = 1
def archive(repo_path, archive_path, repo_id, commit, filter_extensions=True):
# Determine if we can access the path where the archive should be
if not _determine_access(archive_path):
logger.error("Failed to save to archive %s" % archive_path)
return False
temp_archive = os.path.join(repo_path, "%s-temp" % commit)
temp_archive_compress_file_no_ext = os.path.join(temp_archive, commit)
temp_archive_compress_file = "%s.tar.gz" % temp_archive_compress_file_no_ext
archive_repo_path = os.path.join(archive_path, repo_id)
archive_compress_file = "%s.tar.gz" % os.path.join(archive_repo_path, commit)
_clear_archive(temp_archive, archive_compress_file)
target_directories = _identify_target_directories(repo_path)
_clone_files_in_targets(repo_path, temp_archive, target_directories, filter_extensions=filter_extensions)
_compress_files(temp_archive, temp_archive_compress_file_no_ext)
_move_compress_file_to_archive(archive_repo_path, temp_archive_compress_file)
# Delete the temporary folder
_clear_archive_temp(temp_archive)
return True
def _determine_access(archive_path):
return os.path.exists(archive_path)
def _clear_archive(archive_temp, archive_compress_file):
_clear_archive_temp(archive_temp)
if os.path.exists(archive_compress_file):
os.remove(archive_compress_file)
def _clear_archive_temp(temp_archive):
if os.path.exists(temp_archive):
shutil.rmtree(temp_archive)
def _identify_target_directories(repo_path):
folder = "target"
nesting = "**/"
target_directories = glob.glob(r'%s%s' % (repo_path, folder))
compound_nesting = ""
# We need to navigate the repository to find project target folders
for count in range(5):
compound_nesting += nesting
target_directories += glob.glob(r'%s%s%s' % (repo_path, compound_nesting, folder))
return target_directories
def _clone_files_in_targets(repo_path, temp_archive, target_directories, filter_extensions):
# Determine if we need to filter any of the files
if filter_extensions:
ignore = shutil.ignore_patterns(FILTERED_EXTENSIONS)
else:
ignore = None
for path in target_directories:
folder = path[len(repo_path):]
shutil.copytree(path, "%s/%s" % (temp_archive, folder), ignore=ignore, symlinks=True)
def _compress_files(archive_temp, temp_archive_compress_file_no_ext):
# If the compression is changed the file extension needs to be changed as well in the parent method
shutil._make_tarball(temp_archive_compress_file_no_ext, archive_temp, compress="gzip")
def _move_compress_file_to_archive(repo_archive_path, temp_archive_compress_file):
if not os.path.exists(repo_archive_path):
os.makedirs(repo_archive_path)
shutil.move(temp_archive_compress_file, repo_archive_path)
| mit | -329,184,961,511,472,600 | 34.96 | 121 | 0.729032 | false |
MCFlowMace/Wordom | src/setup.py | 1 | 1423 | #! /usr/bin/env python
# System imports
from distutils.core import *
from distutils import sysconfig
# Third-party modules - we depend on numpy
import numpy
# in order to check whether lapack are present ...
import numpy.distutils.system_info as sysinfo
# Obtain the numpy include directory. This works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
# wordom extension module
if len(sysinfo.get_info('lapack')) == 0:
_wordom = Extension("_wordom",
["wordom.i","fileio.c","tools.c","qcprot.c", "xdrfile.c", "xdrfile_xtc.c"],
)
else:
_wordom = Extension("_wordom",
["wordom.i","fileio.c","tools.c","qcprot.c", "xdrfile.c", "xdrfile_xtc.c"],
include_dirs = [numpy_include],
extra_compile_args = ["-D LAPACK"],
libraries = [ 'lapack', 'blas' ]
)
# NumyTypemapTests setup
setup( name = "wordom",
description = "wordom is a molecular structure and data manipulation program/library",
author = "Michele Seeber & colleagues",
url = "http://wordom.sf.net",
author_email= "mseeber@gmail.com",
license = "GPL",
version = "0.23",
ext_modules = [_wordom],
py_modules = ['wordom']
)
| gpl-3.0 | 8,072,262,394,954,524,000 | 32.880952 | 96 | 0.575545 | false |
montefra/dodocs | dodocs/__init__.py | 1 | 1068 | """Main function
Copyright (c) 2015 Francesco Montesano
MIT Licence
"""
import os
import sys
from dodocs.cmdline import parse
import dodocs.logger as dlog
__version__ = "0.0.1"
def main(argv=None):
"""
Main code
Parameters
----------
argv : list of strings, optional
command line arguments
"""
args = parse(argv=argv)
dlog.setLogger(args)
# make sure to reset the subcommand name
log = dlog.getLogger()
if "func" in args:
args.func(args)
log.debug("Finished")
return 0
else:
# defaults profile to list
if args.subparser_name == 'profile' and args.profile_cmd is None:
main(sys.argv[1:] + ["list"])
else:
# in the other cases suggest to run -h
msg = ("Please provide a valid command.\n"
"Type\n " + os.path.split(sys.argv[0])[1])
if args.subparser_name is not None:
msg += " " + args.subparser_name
msg += ' -h'
log.error(msg)
return 1
| mit | 1,274,893,821,913,090,000 | 21.723404 | 73 | 0.549625 | false |
mozilla/kitsune | kitsune/wiki/permissions.py | 1 | 4844 | import logging
from django.conf import settings
log = logging.getLogger("k.wiki")
# Why is this a mixin if it can only be used for the Document model?
# Good question! My only good reason is to keep the permission related
# code organized and contained in one place.
class DocumentPermissionMixin(object):
"""Adds of permission checking methods to the Document model."""
def allows(self, user, action):
"""Check if the user has the permission on the document."""
# If this is kicking up a KeyError it's probably because you typoed!
return getattr(self, "_allows_%s" % action)(user)
def _allows_create_revision(self, user):
"""Can the user create a revision for the document?"""
# For now (ever?), creating revisions isn't restricted at all.
return True
def _allows_edit(self, user):
"""Can the user edit the document?"""
# Document editing isn't restricted until it has an approved
# revision.
if not self.current_revision:
return True
# Locale leaders and reviewers can edit in their locale.
locale = self.locale
if _is_leader(locale, user) or _is_reviewer(locale, user):
return True
# And finally, fallback to the actual django permission.
return user.has_perm("wiki.change_document")
def _allows_delete(self, user):
"""Can the user delete the document?"""
# Locale leaders can delete documents in their locale.
locale = self.locale
if _is_leader(locale, user):
return True
# Fallback to the django permission.
return user.has_perm("wiki.delete_document")
def _allows_archive(self, user):
"""Can the user archive the document?"""
# Just use the django permission.
return user.has_perm("wiki.archive_document")
def _allows_edit_keywords(self, user):
"""Can the user edit the document's keywords?"""
# If the document is in the default locale, just use the
# django permission.
# Editing keywords isn't restricted in other locales.
return self.locale != settings.WIKI_DEFAULT_LANGUAGE or user.has_perm("wiki.edit_keywords")
def _allows_edit_needs_change(self, user):
"""Can the user edit the needs change fields for the document?"""
# If the document is in the default locale, just use the
# django permission.
# Needs change isn't used for other locales (yet?).
return self.locale == settings.WIKI_DEFAULT_LANGUAGE and user.has_perm(
"wiki.edit_needs_change"
)
def _allows_mark_ready_for_l10n(self, user):
""""Can the user mark the document as ready for localization?"""
# If the document is localizable and the user has the django
# permission, then the user can mark as ready for l10n.
return self.is_localizable and user.has_perm("wiki.mark_ready_for_l10n")
def _allows_review_revision(self, user):
"""Can the user review a revision for the document?"""
# Locale leaders and reviewers can review revisions in their
# locale.
locale = self.locale
if _is_leader(locale, user) or _is_reviewer(locale, user):
return True
# Fallback to the django permission.
return user.has_perm("wiki.review_revision")
def _allows_delete_revision(self, user):
"""Can the user delete a document's revisions?"""
# Locale leaders and reviewers can delete revisions in their
# locale.
locale = self.locale
if _is_leader(locale, user) or _is_reviewer(locale, user):
return True
# Fallback to the django permission.
return user.has_perm("wiki.delete_revision")
def _is_leader(locale, user):
"""Checks if the user is a leader for the given locale.
Returns False if the locale doesn't exist. This will should only happen
if we forgot to insert a new locale when enabling it or during testing.
"""
from kitsune.wiki.models import Locale
try:
locale_team = Locale.objects.get(locale=locale)
except Locale.DoesNotExist:
log.warning("Locale not created for %s" % locale)
return False
return user in locale_team.leaders.all()
def _is_reviewer(locale, user):
"""Checks if the user is a reviewer for the given locale.
Returns False if the locale doesn't exist. This will should only happen
if we forgot to insert a new locale when enabling it or during testing.
"""
from kitsune.wiki.models import Locale
try:
locale_team = Locale.objects.get(locale=locale)
except Locale.DoesNotExist:
log.warning("Locale not created for %s" % locale)
return False
return user in locale_team.reviewers.all()
| bsd-3-clause | 3,488,450,285,851,385,000 | 35.69697 | 99 | 0.652147 | false |
aussendorf/bareos-fd-python-plugins | plugin/BareosFdPluginBaseclass.py | 1 | 5778 | #This file is now part of the main Bareos repo. Do not use this version, use the package bareos-filedaemon-python-plugin instead
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Baseclass for Bareos python plugins
# Functions taken and adapted from bareos-fd.py
# (c) Bareos GmbH & Co. KG, Maik Aussendorf
# AGPL v.3
from bareosfd import *
from bareos_fd_consts import *
from io import open
from os import O_WRONLY, O_CREAT
class BareosFdPluginBaseclass:
''' Bareos python plugin base class '''
def __init__(self, context, plugindef):
DebugMessage(context, 100, "Constructor called in module " + __name__ + "\n");
events = [];
events.append(bEventType['bEventJobEnd']);
events.append(bEventType['bEventEndBackupJob']);
events.append(bEventType['bEventEndFileSet']);
events.append(bEventType['bEventHandleBackupFile']);
RegisterEvents(context, events);
# get some static Bareos values
self.fdname = GetValue(context, bVariable['bVarFDName']);
self.jobId = GetValue(context, bVariable['bVarJobId']);
self.client = GetValue(context, bVariable['bVarClient']);
self.level = GetValue(context, bVariable['bVarLevel']);
self.jobName = GetValue(context, bVariable['bVarJobName']);
self.workingdir = GetValue(context, bVariable['bVarWorkingDir']);
DebugMessage(context, 100, "FDName = " + self.fdname + " - BareosFdPluginBaseclass\n");
DebugMessage(context, 100, "WorkingDir = " + self.workingdir + " jobId: " + str(self.jobId) + "\n");
def parse_plugin_definition(self,context, plugindef):
DebugMessage(context, 100, "plugin def parser called with " + plugindef + "\n");
# Parse plugin options into a dict
self.options = dict();
plugin_options = plugindef.split(":");
for current_option in plugin_options:
key,sep,val = current_option.partition("=");
DebugMessage(context, 100, "key:val: " + key + ':' + val + "\n");
if val == '':
continue;
else:
self.options[key] = val;
# you should overload this method with your own and do option checking here, return bRCs['bRC_Error'], if options are not ok
# or better call super.parse_plugin_definition in your own class and make sanity check on self.options afterwards
return bRCs['bRC_OK'];
def plugin_io(self, context, IOP):
DebugMessage(context, 100, "plugin_io called with " + str(IOP) + "\n");
FNAME = IOP.fname;
if IOP.func == bIOPS['IO_OPEN']:
try:
if IOP.flags & (O_CREAT | O_WRONLY):
self.file = open(FNAME, 'wb');
else:
self.file = open(FNAME, 'rb');
except:
IOP.status = -1;
return bRCs['bRC_Error'];
return bRCs['bRC_OK'];
elif IOP.func == bIOPS['IO_CLOSE']:
self.file.close();
return bRCs['bRC_OK'];
elif IOP.func == bIOPS['IO_SEEK']:
return bRCs['bRC_OK'];
elif IOP.func == bIOPS['IO_READ']:
IOP.buf = bytearray(IOP.count);
IOP.status = self.file.readinto(IOP.buf);
IOP.io_errno = 0
return bRCs['bRC_OK'];
elif IOP.func == bIOPS['IO_WRITE']:
IOP.status = self.file.write(IOP.buf);
IOP.io_errno = 0
return bRCs['bRC_OK'];
def handle_plugin_event(self, context, event):
if event == bEventType['bEventJobEnd']:
DebugMessage(context, 100, "handle_plugin_event called with bEventJobEnd\n");
elif event == bEventType['bEventEndBackupJob']:
DebugMessage(context, 100, "handle_plugin_event called with bEventEndBackupJob\n");
elif event == bEventType['bEventEndFileSet']:
DebugMessage(context, 100, "handle_plugin_event called with bEventEndFileSet\n");
else:
DebugMessage(context, 100, "handle_plugin_event called with event" + str(event) + "\n");
return bRCs['bRC_OK'];
def start_backup_file(self,context, savepkt):
DebugMessage(context, 100, "start_backup called\n");
# Base method, we do not add anything, overload this method with your implementation to add files to backup fileset
return bRCs['bRC_Skip'];
def end_backup_file(self, context):
DebugMessage(context, 100, "end_backup_file() entry point in Python called\n")
return bRCs['bRC_OK'];
def start_restore_file(self, context, cmd):
DebugMessage(context, 100, "start_restore_file() entry point in Python called with" + str(cmd) + "\n")
return bRCs['bRC_OK'];
def end_restore_file(self,context):
DebugMessage(context, 100, "end_restore_file() entry point in Python called\n")
return bRCs['bRC_OK'];
def restore_object_data(self, context, ROP):
DebugMessage(context, 100, "restore_object_data called with " + str(ROP) + "\n");
return bRCs['bRC_OK'];
def create_file(self,context, restorepkt):
DebugMessage(context, 100, "create_file() entry point in Python called with" + str(restorepkt) + "\n")
restorepkt.create_status = bCFs['CF_EXTRACT'];
return bRCs['bRC_OK'];
def check_file(self,context, fname):
DebugMessage(context, 100, "check_file() entry point in Python called with" + str(fname) + "\n")
return bRCs['bRC_OK'];
def handle_backup_file(self,context, savepkt):
DebugMessage(context, 100, "handle_backup_file called with " + str(savepkt) + "\n");
return bRCs['bRC_OK'];
# vim: ts=4 tabstop=4 expandtab shiftwidth=4 softtabstop=4
| agpl-3.0 | -9,126,865,445,505,169,000 | 40.271429 | 132 | 0.607823 | false |
uclouvain/osis_louvain | manage.py | 1 | 1269 | #!/usr/bin/env python
import os
import sys
import dotenv
if __name__ == "__main__":
if 'test' in sys.argv:
os.environ.setdefault('TESTING', 'True')
dotenv.read_dotenv()
SETTINGS_FILE = os.environ.get('DJANGO_SETTINGS_MODULE', 'backoffice.settings.local')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", SETTINGS_FILE)
from django.core.management import execute_from_command_line
try:
execute_from_command_line(sys.argv)
except KeyError as ke:
print("Error loading application.")
print("The following environment var is not defined : {}".format(str(ke)))
print("Check the following possible causes :")
print(" - You don't have a .env file. You can copy .env.example to .env to use default")
print(" - Mandatory variables are not defined in your .env file.")
sys.exit("SettingsKeyError")
except ImportError as ie:
print("Error loading application : {}".format(str(ie)))
print("Check the following possible causes :")
print(" - The DJANGO_SETTINGS_MODULE defined in your .env doesn't exist")
print(" - No DJANGO_SETTINGS_MODULE is defined and the default 'backoffice.settings.local' doesn't exist ")
sys.exit("DjangoSettingsError")
| agpl-3.0 | 1,813,836,332,511,796,500 | 41.3 | 115 | 0.666667 | false |
yuyichao/pyscical | pyscical/utils.py | 1 | 1930 | # Copyright (C) 2012~2014 by Yichao Yu
# yyc1992@gmail.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
def cffi_ptr(obj, _ffi, writable=False, retain=False):
if isinstance(obj, bytes):
if writable:
# bytes is not writable
raise TypeError('expected an object with a writable '
'buffer interface.')
if retain:
buf = _ffi.new('char[]', obj)
return (buf, len(obj), buf)
return (obj, len(obj), obj)
elif isinstance(obj, np.ndarray):
# numpy array
return (_ffi.cast('void*', obj.__array_interface__['data'][0]),
obj.nbytes, obj)
elif isinstance(obj, np.generic):
if writable or retain:
raise TypeError('expected an object with a writable '
'buffer interface.')
# numpy scalar
#
# * obj.__array_interface__ exists in CPython although requires
# holding a reference to the dynamically created
# __array_interface__ object
#
# * does not exist (yet?) in numpypy.
s_array = obj[()]
return (_ffi.cast('void*', s_array.__array_interface__['data'][0]),
s_array.nbytes, s_array)
raise TypeError("Only numpy arrays and bytes can be converted")
| gpl-3.0 | -2,692,633,605,925,007,000 | 40.06383 | 75 | 0.622798 | false |
potzenheimer/meetshaus | src/meetshaus.sitetheme/meetshaus/sitetheme/tests.py | 1 | 1419 | import unittest
#from zope.testing import doctestunit
#from zope.component import testing
from Testing import ZopeTestCase as ztc
from Products.Five import fiveconfigure
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import PloneSite
ptc.setupPloneSite()
import meetshaus.sitetheme
class TestCase(ptc.PloneTestCase):
class layer(PloneSite):
@classmethod
def setUp(cls):
fiveconfigure.debug_mode = True
ztc.installPackage(meetshaus.sitetheme)
fiveconfigure.debug_mode = False
@classmethod
def tearDown(cls):
pass
def test_suite():
return unittest.TestSuite([
# Unit tests
#doctestunit.DocFileSuite(
# 'README.txt', package='meetshaus.sitetheme',
# setUp=testing.setUp, tearDown=testing.tearDown),
#doctestunit.DocTestSuite(
# module='meetshaus.sitetheme.mymodule',
# setUp=testing.setUp, tearDown=testing.tearDown),
# Integration tests that use PloneTestCase
#ztc.ZopeDocFileSuite(
# 'README.txt', package='meetshaus.sitetheme',
# test_class=TestCase),
#ztc.FunctionalDocFileSuite(
# 'browser.txt', package='meetshaus.sitetheme',
# test_class=TestCase),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| mit | -5,397,782,054,567,176,000 | 24.8 | 61 | 0.653982 | false |
juanka1331/VAN-applied-to-Nifti-images | final_scripts/tests_over_3dmask_generator.py | 1 | 1589 | import sys
import os
from lib.data_loader import utils_mask3d
sys.path.append(os.path.dirname(os.getcwd()))
from lib.utils import output_utils
from lib.data_loader import mri_atlas
from lib.data_loader import pet_atlas
from lib.data_loader import PET_stack_NORAD
from lib.data_loader import MRI_stack_NORAD
from lib.utils.os_aux import create_directories
import settings
region = 75
#images = "MRI"
images = "PET"
path_folder3D = os.path.join(settings.path_to_project, "folder3D")
path_folder_masks3d = os.path.join(path_folder3D, "masks3D")
path_mask = os.path.join(
path_folder_masks3d, "{1}_region:{0}".format(region, images))
create_directories([path_folder3D, path_folder_masks3d])
atlas = None
reshape_kind = None
colour_kind = None
stack_dict = None
if images == "MRI":
stack_dict = MRI_stack_NORAD.get_gm_stack()
reshape_kind = "A"
colour_kind = "Greys"
atlas = mri_atlas.load_atlas_mri()
elif images == "PET":
stack_dict = PET_stack_NORAD.get_full_stack()
reshape_kind = "F"
colour_kind = "jet"
total_size = stack_dict['total_size']
imgsize = stack_dict['imgsize']
voxels_index = stack_dict['voxel_index']
map_region_voxels = atlas[region] # index refered to nbground voxels
no_bg_region_voxels_index = voxels_index[map_region_voxels]
mask3d = utils_mask3d.generate_region_3dmaskatlas(
no_bg_region_voxels_index=no_bg_region_voxels_index,
reshape_kind=reshape_kind,
imgsize=imgsize,
totalsize=total_size)
output_utils.from_3d_image_to_nifti_file(
path_to_save=path_mask,
image3d=mask3d)
| gpl-2.0 | 1,206,515,291,192,381,200 | 27.375 | 69 | 0.713027 | false |
bbcf/bbcflib | bein/tests/test_bein.py | 1 | 13588 | import socket
import re
import sys
import random
from unittest2 import TestCase, TestSuite, main, TestLoader, skipIf
from bein import *
from bein.util import touch
M = MiniLIMS("testing_lims")
def hostname_contains(pattern):
hostname = socket.gethostbyaddr(socket.gethostname())[0]
if re.search(pattern, hostname) == None:
return False
else:
return True
try:
if hostname_contains('vital-it.ch'):
not_vital_it = False
else:
not_vital_it = True
except:
not_vital_it = True
@program
def count_lines(filename):
"""Count the number of lines in *filename* (equivalent to ``wc -l``)."""
def parse_output(p):
m = re.search(r'^\s*(\d+)\s+' + filename + r'\s*$',
''.join(p.stdout))
if m == None:
return None
else:
return int(m.groups()[-1]) # in case of a weird line in LSF
return {"arguments": ["wc","-l",filename],
"return_value": parse_output}
class TestProgramBinding(TestCase):
def test_binding_works(self):
with execution(None) as ex:
with open('boris','w') as f:
f.write("This is a test\nof the emergency broadcast\nsystem.\n")
self.assertEqual(count_lines(ex, 'boris'), 3)
def test_local_works(self):
with execution(None) as ex:
with open('boris','w') as f:
f.write("This is a test\nof the emergency broadcast\nsystem.\n")
q = count_lines._local(ex, 'boris')
self.assertEqual(str(q.__class__), "<class 'bein.Future'>")
self.assertEqual(q.wait(), 3)
@skipIf(not_vital_it, "Not on VITAL-IT.")
def test_lsf_works(self):
with execution(None) as ex:
with open('boris','w') as f:
f.write("This is a test\nof the emergency broadcast\nsystem.\n")
q = count_lines._lsf(ex, 'boris')
self.assertEqual(str(q.__class__), "<class 'bein.Future'>")
self.assertEqual(q.wait(), 3)
def test_nonblocking_with_via_local(self):
with execution(None) as ex:
with open('boris','w') as f:
f.write("This is a test\nof the emergency broadcast\nsystem.\n")
q = count_lines.nonblocking(ex, 'boris', via='local')
self.assertEqual(str(q.__class__), "<class 'bein.Future'>")
self.assertEqual(q.wait(), 3)
@skipIf(not_vital_it, "Not on VITAL-IT")
def test_nonblocking_with_via_lsf(self):
with execution(None) as ex:
with open('boris','w') as f:
f.write("This is a test\nof the emergency broadcast\nsystem.\n")
q = count_lines.nonblocking(ex, 'boris', via='lsf')
self.assertEqual(str(q.__class__), "<class 'bein.Future'>")
self.assertEqual(q.wait(), 3)
def test_syntaxerror_outside_execution(self):
with execution(M) as ex:
pass
M.delete_execution(ex.id)
with self.assertRaises(SyntaxError):
touch(ex)
def test_syntaxerror_outside_execution_nonblocking(self):
with execution(M) as ex:
pass
M.delete_execution(ex.id)
with self.assertRaises(SyntaxError):
touch.nonblocking(ex)
class TestUniqueFilenameIn(TestCase):
def test_state_determines_filename(self):
with execution(None) as ex:
st = random.getstate()
f = unique_filename_in()
random.setstate(st)
g = unique_filename_in()
self.assertEqual(f, g)
def test_unique_filename_exact_match(self):
with execution(None) as ex:
st = random.getstate()
f = touch(ex)
random.setstate(st)
g = touch(ex)
self.assertNotEqual(f, g)
def test_unique_filename_beginnings_match(self):
with execution(None) as ex:
st = random.getstate()
f = unique_filename_in()
touch(ex, f + 'abcdefg')
random.setstate(st)
g = touch(ex)
self.assertNotEqual(f, g)
class TestMiniLIMS(TestCase):
def test_resolve_alias_exception_on_no_file(self):
with execution(None) as ex:
M = MiniLIMS("boris")
self.assertRaises(ValueError, M.resolve_alias, 55)
def test_resolve_alias_returns_int_if_exists(self):
with execution(None) as ex:
f = touch(ex)
M = MiniLIMS("boris")
a = M.import_file(f)
self.assertEqual(M.resolve_alias(a), a)
def test_resolve_alias_with_alias(self):
with execution(None) as ex:
f = touch(ex)
M = MiniLIMS("boris")
a = M.import_file(f)
M.add_alias(a, 'hilda')
self.assertEqual(M.resolve_alias('hilda'), a)
def test_path_to_file_on_execution(self):
with execution(None) as ignoreme:
f = touch(ignoreme)
M = MiniLIMS("boris")
fid = M.import_file(f)
mpath = M.path_to_file(fid)
with execution(M) as ex:
fpath = ex.path_to_file(fid)
self.assertEqual(mpath, fpath)
def test_search_files(self):
f_desc = unique_filename_in()
t1 = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
f_id = M.import_file("../LICENSE", description=f_desc)
t2 = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
f_found = M.search_files(with_text="LICENSE", with_description=f_desc, older_than=t2, source="import", newer_than=t1)
self.assertIn(f_id, f_found)
M.delete_file(f_id)
f_desc = {"name":"test_search_files_by_dict", "m":5, "n":15}
f_id = M.import_file("../LICENSE", description=f_desc)
f_found = M.search_files(with_description=f_desc)
self.assertIn(f_id, f_found)
M.delete_file(f_id)
def test_search_executions(self):
with execution(M, description="desc_test") as ex:
pass
ex_found = M.search_executions(with_description="desc_test")
self.assertIn(ex.id,ex_found)
M.delete_execution(ex.id)
ex_desc = {"name":"test_search_ex_by_dict", "m":5, "n":15}
with execution(M, description=ex_desc) as ex:
pass
ex_found = M.search_executions(with_description=ex_desc)
self.assertIn(ex.id, ex_found)
try:
with execution(M, description="desc_test_fail") as ex_nofail:
3/0
except: pass
ex_found_nofail = M.search_executions(with_description="desc_test", fails=False)
for e in ex_found_nofail:
error = M.fetch_execution(e)["exception_string"]
self.assertIsNone(error)
ex_found_fail = M.search_executions(with_description="desc_test", fails=True)
for e in ex_found_fail:
error = M.fetch_execution(e)["exception_string"]
self.assertIsNotNone(error)
M.delete_execution(ex.id)
M.delete_execution(ex_nofail.id)
def test_browse_files(self):
f_desc = "browse_file_test"
f_id = M.import_file("../LICENSE", description=f_desc)
f_found = M.browse_files(with_description=f_desc)
#self.assertIn(f_id,f_found)
M.delete_file(f_id)
def test_browse_executions(self):
ex_desc = "browse_ex_test"
with execution(M, description=ex_desc) as ex:
touch(ex,"boris")
ex_found = M.browse_executions(with_description=ex_desc)
#self.assertIs(ex.id,ex_found)
M.delete_execution(ex.id)
class TestExportFile(TestCase):
def test_export_file(self):
filea = M.import_file("../LICENSE") #file ID
fileb = M.import_file("../doc/bein.rst")
testdir = "testing.files"
if not os.path.isdir(testdir):
os.mkdir(testdir)
M.associate_file(fileb,filea,template="%s.linked")
M.export_file(filea, dst=os.path.join(testdir,"exportedfile"), with_associated=True) #test with file name given
self.assertTrue(os.path.isfile(os.path.join(testdir,"exportedfile"+".linked")))
os.remove(os.path.join(testdir,"exportedfile"))
os.remove(os.path.join(testdir,"exportedfile"+".linked"))
M.export_file(filea, dst=testdir, with_associated=True) #test with directory given
filename = M.fetch_file(filea)['repository_name']
self.assertTrue(os.path.isfile(os.path.join(testdir, filename +".linked")))
os.remove(os.path.join(testdir, filename))
os.remove(os.path.join(testdir, filename +".linked"))
@program
def echo(s):
return {'arguments': ['echo',str(s)],
'return_value': None}
class TestStdoutStderrRedirect(TestCase):
def test_stdout_redirected(self):
try:
with execution(M) as ex:
f = unique_filename_in()
echo(ex, "boris!", stdout=f)
with open(f) as q:
l = q.readline()
self.assertEqual(l, 'boris!\n')
finally:
M.delete_execution(ex.id)
def test_stdout_local_redirected(self):
try:
with execution(M) as ex:
f = unique_filename_in()
m = echo.nonblocking(ex, "boris!", stdout=f)
m.wait()
with open(f) as q:
l = q.readline()
self.assertEqual(l, 'boris!\n')
finally:
M.delete_execution(ex.id)
class TestNoSuchProgramError(TestCase):
@program
def nonexistent():
return {"arguments": ["meepbarf","hilda"],
"return_value": None}
def test_nonexistent(self):
with execution(None) as ex:
self.assertRaises(ValueError, self.nonexistent, ex)
def test_nonexistent_local(self):
with execution(None) as ex:
f = self.nonexistent.nonblocking(ex, via="local")
self.assertRaises(ValueError, f.wait)
class TestImmutabilityDropped(TestCase):
def test_immutability_dropped(self):
executions = []
with execution(M) as ex:
touch(ex, "boris")
ex.add("boris")
exid1 = ex.id
borisid = M.search_files(source=('execution',ex.id))[0]
self.assertFalse(M.fetch_file(borisid)['immutable'])
with execution(M) as ex:
ex.use(borisid)
exid2 = ex.id
self.assertTrue(M.fetch_file(borisid)['immutable'])
M.delete_execution(exid2)
self.assertFalse(M.fetch_file(borisid)['immutable'])
M.delete_execution(exid1)
self.assertEqual(M.search_files(source=('execution',exid1)), [])
class TestAssociatePreservesFilenames(TestCase):
def test_associate_with_names(self):
try:
with execution(M) as ex:
touch(ex, "boris")
touch(ex, "hilda")
ex.add("boris")
ex.add("hilda", associate_to_filename="boris", template="%s.meep")
boris_id = M.search_files(source=('execution',ex.id), with_text="boris")[0]
hilda_id = M.search_files(source=('execution',ex.id), with_text="hilda")[0]
boris_name = M.fetch_file(boris_id)['repository_name']
hilda_name = M.fetch_file(hilda_id)['repository_name']
self.assertEqual("%s.meep" % boris_name, hilda_name)
finally:
try:
M.delete_execution(ex.id)
except:
pass
def test_associate_with_id(self):
try:
fid = M.import_file('test.py')
with execution(M) as ex:
touch(ex, "hilda")
ex.add("hilda", associate_to_id=fid, template="%s.meep")
hilda_id = M.search_files(source=('execution',ex.id))[0]
hilda_name = M.fetch_file(hilda_id)['repository_name']
fid_name = M.fetch_file(fid)['repository_name']
self.assertEqual("%s.meep" % fid_name, hilda_name)
finally:
try:
M.delete_execution(ex.id)
M.delete_file(fid)
except:
pass
def test_hierarchical_association(self):
try:
with execution(M) as ex:
touch(ex, "a")
touch(ex, "b")
touch(ex, "c")
ex.add("a")
ex.add("b", associate_to_filename="a", template="%s.step")
ex.add("c", associate_to_filename="b", template="%s.step")
a_id = M.search_files(source=('execution',ex.id), with_text='a')[0]
b_id = M.search_files(source=('execution',ex.id), with_text='b')[0]
c_id = M.search_files(source=('execution',ex.id), with_text='c')[0]
a_name = M.fetch_file(a_id)['repository_name']
b_name = M.fetch_file(b_id)['repository_name']
c_name = M.fetch_file(c_id)['repository_name']
self.assertEqual("%s.step" % a_name, b_name)
self.assertEqual("%s.step.step" % a_name, c_name)
finally:
try:
M.delete_execution(ex.id)
except:
pass
#def test_given(tests):
# module = sys.modules[__name__]
# if tests == None:
# defaultTest = None
# else:
# loader = TestLoader()
# defaultTest = TestSuite()
# tests = loader.loadTestsFromNames(tests, module)
# defaultTest.addTests(tests)
# main(defaultTest=defaultTest)
if __name__ == '__main__':
if len(sys.argv) > 1:
test_given(sys.argv[1:])
else:
test_given(None)
| gpl-3.0 | 8,427,574,205,783,375,000 | 35.04244 | 125 | 0.562629 | false |
stevenwudi/Kernelized_Correlation_Filter | CNN_training.py | 1 | 3640 | import numpy as np
from keras.optimizers import SGD
from models.CNN_CIFAR import cnn_cifar_batchnormalisation, cnn_cifar_small, cnn_cifar_nodropout, \
cnn_cifar_small_batchnormalisation
from models.DataLoader import DataLoader
from scripts.progress_bar import printProgress
from time import time, localtime
# this is a predefined dataloader
loader = DataLoader(batch_size=32)
# construct the model here (pre-defined model)
model = cnn_cifar_small_batchnormalisation(loader.image_shape)
print(model.name)
nb_epoch = 200
early_stopping = True
early_stopping_count = 0
early_stopping_wait = 3
train_loss = []
valid_loss = []
learning_rate = [0.0001, 0.001, 0.01]
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=learning_rate[-1], decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd)
# load validation data from the h5py file (heavy lifting here)
x_valid, y_valid = loader.get_valid()
best_valid = np.inf
for e in range(nb_epoch):
print("epoch %d" % e)
loss_list = []
time_list = []
time_start = time()
for i in range(loader.n_iter_train):
time_start_batch = time()
X_batch, Y_batch = loader.next_train_batch()
loss_list.append(model.train_on_batch(X_batch, Y_batch))
# calculate some time information
time_list.append(time() - time_start_batch)
eta = (loader.n_iter_train - i) * np.array(time_list).mean()
printProgress(i, loader.n_iter_train-1, prefix='Progress:', suffix='batch error: %0.5f, ETA: %0.2f sec.'%(np.array(loss_list).mean(), eta), barLength=50)
printProgress(i, loader.n_iter_train - 1, prefix='Progress:', suffix='batch error: %0.5f' % (np.array(loss_list).mean()), barLength=50)
train_loss.append(np.asarray(loss_list).mean())
print('training loss is %f, one epoch uses: %0.2f sec' % (train_loss[-1], time() - time_start))
valid_loss.append(model.evaluate(x_valid, y_valid))
print('valid loss is %f' % valid_loss[-1])
if best_valid > valid_loss[-1]:
early_stopping_count = 0
print('saving best valid result...')
best_valid = valid_loss[-1]
model.save('./models/CNN_Model_OBT100_multi_cnn_best_valid_'+model.name+'.h5')
else:
# we wait for early stopping loop until a certain time
early_stopping_count += 1
if early_stopping_count > early_stopping_wait:
early_stopping_count = 0
if len(learning_rate) > 1:
learning_rate.pop()
print('decreasing the learning rate to: %f'%learning_rate[-1])
model.optimizer.lr.set_value(learning_rate[-1])
else:
break
lt = localtime()
lt_str = str(lt.tm_year)+"."+str(lt.tm_mon).zfill(2)+"." \
+str(lt.tm_mday).zfill(2)+"."+str(lt.tm_hour).zfill(2)+"."\
+str(lt.tm_min).zfill(2)+"."+str(lt.tm_sec).zfill(2)
np.savetxt('./models/train_loss_'+model.name+'_'+lt_str+'.txt', train_loss)
np.savetxt('./models/valid_loss_'+model.name+'_'+lt_str+'.txt', valid_loss)
model.save('./models/CNN_Model_OBT100_multi_cnn_'+model.name+'_final.h5')
print("done")
#### we show some visualisation here
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
train_loss = np.loadtxt('./models/train_loss_'+model.name+'_'+lt_str+'.txt')
valid_loss = np.loadtxt('./models/valid_loss_'+model.name+'_'+lt_str+'.txt')
plt.plot(train_loss, 'b')
plt.plot(valid_loss, 'r')
blue_label = mpatches.Patch(color='blue', label='train_loss')
red_label = mpatches.Patch(color='red', label='valid_loss')
plt.legend(handles=[blue_label, red_label])
| gpl-3.0 | -7,064,727,878,876,511,000 | 39.898876 | 161 | 0.657143 | false |
Cubitect/ASMModSuit | ASMVillageMarker.py | 1 | 5318 | import SRenderLib
from asmutils import *
def create_mod(util):
print '\nSearching for mappings for ASMVillageMarker...'
SRenderLib.setup_lib(util)
lines = util.readj('World')
pos = findOps(lines,0,[['.field','protected',';'],['.field','protected','Z'],['.field','protected',';'],['.field','protected',';']])
util.setmap('VillageCollection',betweenr(lines[pos],'L',';'))
util.setmap('World.villageCollectionObj',endw(lines[pos],2))
pos = findOps(lines,pos+1,[['.method','public','()L'+util.getmap('VillageCollection')]])
if pos is not None:
util.setmap('World.getVillageCollection',endw(lines[pos],3))
lines = util.readj('VillageCollection')
pos = findOps(lines,0,[['.method','public','()Ljava/util/List']])
util.setmap('VillageCollection.getVillageList',endw(lines[pos],3))
pos = findOps(lines,pos+1,[['.method','public',')L']])
util.setmap('Village',betweenr(lines[pos],')L',';'))
lines = util.readj('Village')
pos = findOps(lines,0,[['.method','public','()L']])
util.setmap('Village.getCenter',endw(lines[pos],3))
util.setmap('BlockPos',betweenr(lines[pos],')L',';'))
pos = findOps(lines,pos+1,[['.method','public','()I']])
util.setmap('Village.getVillageRadius',endw(lines[pos],3))
pos = findOps(lines,pos+1,[['.method','public','()Ljava/util/List']])
util.setmap('Village.getVillageDoorInfoList',endw(lines[pos],3))
pos = findOps(lines,pos+1,[['.method','public',')L']])
util.setmap('VillageDoorInfo',betweenr(lines[pos],')L',';'))
lines = util.readj('VillageDoorInfo')
pos = findOps(lines,0,[['.method','public','()L']])
util.setmap('VillageDoorInfo.getDoorBlockPos',endw(lines[pos],3))
lines = util.readj('BlockPos')
pos = findOps(lines,0,[['.super']])
util.setmap('Vec3i',endw(lines[pos],1))
lines = util.readj('Vec3i')
pos = findOps(lines,0, [['.method','public','()I'],['stack 1 locals 1']])
util.setmap('Vec3i.getX',endw(lines[pos-1],3))
pos = findOps(lines,pos+1,[['.method','public','()I'],['stack 1 locals 1']])
util.setmap('Vec3i.getY',endw(lines[pos-1],3))
pos = findOps(lines,pos+1,[['.method','public','()I'],['stack 1 locals 1']])
util.setmap('Vec3i.getZ',endw(lines[pos-1],3))
print 'Applying ASMVillageMarker patch...'
util.setmap('ASMVillageMarker','villagemarker/ASMVillageMarker')
lines = util.readt('ASMVillageMarker')
lines = '\1'.join(lines)
lines = lines.replace('net/minecraft/server/integrated/IntegratedServer', util.getmap('IntegratedServer'))
lines = lines.replace('net/minecraft/client/entity/EntityPlayerSP', util.getmap('EntityPlayerSP'))
lines = lines.replace('net/minecraft/client/Minecraft', util.getmap('Minecraft'))
lines = lines.replace('net/minecraft/world/WorldServer', util.getmap('WorldServer'))
lines = lines.replace('net/minecraft/util/math/BlockPos', util.getmap('BlockPos'))
lines = lines.replace('net/minecraft/village/VillageCollection', util.getmap('VillageCollection'))
lines = lines.replace('net/minecraft/village/VillageDoorInfo', util.getmap('VillageDoorInfo'))
lines = lines.replace('net/minecraft/village/Village', util.getmap('Village'))
lines = lines.replace('thePlayer', util.getmap('Minecraft.thePlayer'))
lines = lines.replace('dimension', util.getmap('Entity.dimension'))
lines = lines.replace('isSingleplayer', util.getmap('Minecraft.isSingleplayer'))
lines = lines.replace('worldServerForDimension', util.getmap('MinecraftServer.worldServerForDimension'))
lines = lines.replace('getVillageDoorInfoList', util.getmap('Village.getVillageDoorInfoList'))
lines = lines.replace('getVillageCollection', util.getmap('World.getVillageCollection'))
lines = lines.replace('getVillageRadius', util.getmap('Village.getVillageRadius'))
lines = lines.replace('getVillageList', util.getmap('VillageCollection.getVillageList'))
lines = lines.replace('getDoorBlockPos', util.getmap('VillageDoorInfo.getDoorBlockPos'))
lines = lines.replace('getIntegratedServer', util.getmap('Minecraft.getIntegratedServer'))
lines = lines.replace('getMinecraft', util.getmap('Minecraft.getMinecraft'))
lines = lines.replace('getCenter', util.getmap('Village.getCenter'))
lines = lines.replace('getX', util.getmap('Vec3i.getX'))
lines = lines.replace('getY', util.getmap('Vec3i.getY'))
lines = lines.replace('getZ', util.getmap('Vec3i.getZ'))
lines = lines.split('\1')
util.write2mod('ASMVillageMarker',lines)
print 'Injecting render call...'
lines = util.readj('EntityRenderer')
pos = 0
while True:
pos = findOps(lines,pos+1,[['ldc','culling']])
if pos is None:
break
pos = findOps(lines,pos+1,[['dload'],['dload'],['dload']])
playerX = endw(lines[pos-2],1)
playerY = endw(lines[pos-1],1)
playerZ = endw(lines[pos ],1)
pos = findOps(lines,pos+1,[['ldc','aboveClouds']])
pos = goBackTo(lines,pos,['invokevirtual'])
lines.insert(pos+1,'dload '+playerX+'\n')
lines.insert(pos+2,'dload '+playerY+'\n')
lines.insert(pos+3,'dload '+playerZ+'\n')
lines.insert(pos+4,'invokestatic Method '+util.getmap('ASMVillageMarker')+' render (DDD)V\n')
util.write2mod('EntityRenderer',lines)
| gpl-3.0 | 2,978,141,941,346,353,700 | 52.717172 | 136 | 0.668672 | false |
shadowmint/nwidget | lib/cocos2d-0.5.5/test/test_menu_items.py | 1 | 2268 | # This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, q"
tags = "menu items, ToggleMenuItem, MultipleMenuItem, MenuItem, EntryMenuItem, ImageMenuItem, ColorMenuItem"
from pyglet import image
from pyglet.gl import *
from pyglet import font
from cocos.director import *
from cocos.menu import *
from cocos.scene import *
from cocos.layer import *
from operator import setslice
def printf(*args):
sys.stdout.write(''.join([str(x) for x in args])+'\n')
class MainMenu(Menu):
def __init__( self ):
super( MainMenu, self ).__init__("Test Menu Items")
# then add the items
item1= ToggleMenuItem('ToggleMenuItem: ', self.on_toggle_callback, True )
resolutions = ['320x200','640x480','800x600', '1024x768', '1200x1024']
item2= MultipleMenuItem('MultipleMenuItem: ',
self.on_multiple_callback,
resolutions)
item3 = MenuItem('MenuItem', self.on_callback )
item4 = EntryMenuItem('EntryMenuItem:', self.on_entry_callback, 'value',
max_length=8)
item5 = ImageMenuItem('imagemenuitem.png', self.on_image_callback)
colors = [(255, 255, 255), (129, 255, 100), (50, 50, 100), (255, 200, 150)]
item6 = ColorMenuItem('ColorMenuItem:', self.on_color_callback, colors)
self.create_menu( [item1,item2,item3,item4,item5,item6] )
def on_quit( self ):
pyglet.app.exit()
def on_multiple_callback(self, idx ):
print 'multiple item callback', idx
def on_toggle_callback(self, b ):
print 'toggle item callback', b
def on_callback(self ):
print 'item callback'
def on_entry_callback (self, value):
print 'entry item callback', value
def on_image_callback (self):
print 'image item callback'
def on_color_callback(self, value):
print 'color item callback:', value
def main():
pyglet.font.add_directory('.')
director.init( resizable=True)
director.run( Scene( MainMenu() ) )
if __name__ == '__main__':
main()
| apache-2.0 | 510,545,519,956,225,660 | 28.648649 | 108 | 0.609788 | false |
jpwhite3/python-whirlwind-tour | examples/lab4.py | 1 | 1539 | from __future__ import print_function
import sys
import re
import glob
import argparse
def eprint(*args, **kwargs):
# Print to STDERR instead of STDOUT
print(*args, file=sys.stderr, **kwargs)
def grep(expression, filepath, ignorecase=False, invert=False):
raw_expression = re.escape(expression)
with open(filepath) as file:
for line in file:
# Enable case matching?
if ignorecase:
matches = re.search(raw_expression, line, re.I)
else:
matches = re.search(raw_expression, line)
# Invert matches if need be and print
if matches and not invert:
print(line)
elif invert and not matches:
print(line)
def main():
parser = argparse.ArgumentParser(description='This is a pure Python based clone of the GREP command')
parser.add_argument('expression', action="store", type=str, help="Regular expression to match against")
parser.add_argument('filepath', action="store", type=str, help="Path to file to search in. supports wildcard globs")
parser.add_argument('-i', action="store_true", default=False, dest="ignorecase", help="Ignore case")
parser.add_argument('-v', action="store_true", default=False, dest="invert", help="Show lines that don't match")
args = parser.parse_args()
file_list = glob.glob(args.filepath)
for f in file_list:
if len(file_list) > 1:
eprint("\nResults for file: %s" % f)
eprint("-"*(len(f)+18))
grep(args.expression, f, ignorecase=args.ignorecase, invert=args.invert)
if __name__ == '__main__':
main()
| cc0-1.0 | -776,733,871,353,268,600 | 30.744681 | 117 | 0.680962 | false |
ayoubg/gem5-graphics | gem5-gpu/tests/quick/se_gpu/10.backprop/test.py | 1 | 1654 | # Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Joel Hestness
options.clusters = 4
options.cmd = 'gem5_gpu_backprop'
options.options = '256'
| bsd-3-clause | 1,989,801,234,938,213,000 | 52.354839 | 72 | 0.792019 | false |
bcarr092/pyCovertAudio | src/pyCovertAudio/BFSKModulator.py | 1 | 2146 | from pyCovertAudio_lib import *
from BaseModulator import BaseModulator
from SignalFunctions import SignalFunctions
class BFSKModulator(BaseModulator):
def __init__(
self, bitsPerSymbol, sampleRate, samplesPerSymbol,
symbolExpansionFactor, separationIntervals, configuration
):
BaseModulator.__init__(
self,
bitsPerSymbol,
sampleRate,
samplesPerSymbol,
symbolExpansionFactor,
separationIntervals,
configuration
)
(
self.symbol0Frequency,
self.symbol1Frequency,
self.deltaFrequency,
self.bandwidth
) = \
python_BFSK_determine_frequencies(
self.samplesPerSymbol,
self.sampleRate,
self.carrierFrequency,
self.separationIntervals
)
def modulate(self, symbolSequence, signal, sentinel=None):
symbolSignalLength = self.samplesPerSymbol * self.symbolExpansionFactor
for symbol in symbolSequence:
symbolFrequency = self.carrierFrequency
if(symbol == 1):
symbolFrequency += self.symbol1Frequency
else:
symbolFrequency += self.symbol0Frequency
x = \
SignalFunctions.modulateFSK(
symbolSignalLength, self.sampleRate, [symbolFrequency]
)
signal.extend(x[: self.samplesPerSymbol])
signal.extend(
[0.0 for i in range(
(self.symbolExpansionFactor - 1) * self.samplesPerSymbol)]
)
def toString(self):
return (
"Modulator:\n\tAlgorithm:\t\t\tBFSK\n\tSymbol 0 frequency:\t\t"
"%.02f\n\tSymbol 1 frequency:\t\t%.02f\n\tMin frequency"
" separation:\t%.02f\n\tBandwidth:\t\t\t%.02f\n%s"
% (
self.symbol0Frequency,
self.symbol1Frequency,
self.deltaFrequency,
self.bandwidth,
BaseModulator.toString(self)
)
)
| apache-2.0 | -7,536,725,190,212,510,000 | 29.225352 | 79 | 0.55685 | false |
modera/mcloud | mcloud/plugins/monitor.py | 1 | 1101 | import inject
from mcloud.application import ApplicationController
from mcloud.events import EventBus
from mcloud.plugin import IMcloudPlugin
from mcloud.plugins import Plugin
from mcloud.txdocker import IDockerClient
from twisted.internet import reactor
from twisted.python import log
from zope.interface import implements
class DockerMonitorPlugin(Plugin):
"""
Monitors docker events and emmits "containers.updated" event when non-internal
containers change their state.
"""
implements(IMcloudPlugin)
client = inject.attr(IDockerClient)
event_bus = inject.attr(EventBus)
app_controller = inject.attr(ApplicationController)
def setup(self):
# reactor.callLater(0, self.attach_to_events)
pass
def on_event(self, event):
if not self.app_controller.is_internal(event['id']):
log.msg('New docker event: %s' % event)
self.event_bus.fire_event('containers.updated', event)
def attach_to_events(self, *args):
log.msg('Start monitoring docker events')
return self.client.events(self.on_event)
| apache-2.0 | -7,506,854,949,518,193,000 | 31.382353 | 82 | 0.722071 | false |
dhhagan/PAM | Python/PAM.py | 1 | 5037 | #PAM.py
import re
import glob, os, time
from numpy import *
from pylab import *
def analyzeFile(fileName,delim):
cols = {}
indexToName = {}
lineNum = 0
goodLines = 0
shortLines = 0
FILE = open(fileName,'r')
for line in FILE:
line = line.strip()
if lineNum < 1:
lineNum += 1
continue
elif lineNum == 1:
headings = line.split(delim)
i = 0
for heading in headings:
heading = heading.strip()
cols[heading] = []
indexToName[i] = heading
i += 1
lineNum += 1
lineLength = len(cols)
else:
data = line.split(delim)
if len(data) == lineLength:
goodLines += 1
i = 0
for point in data:
point = point.strip()
cols[indexToName[i]] += [point]
i += 1
lineNum += 1
else:
shortLines += 1
lineNum += 1
continue
FILE.close
return cols, indexToName, lineNum, shortLines
def numericalSort(value):
numbers = re.compile(r'(\d+)')
parts = numbers.split(value)
parts[1::2] = map(int, parts[1::2])
return parts
def popDate(fileName):
run = fileName.split('.')[0]
runNo = run.split('_')[-1]
return runNo
def getFile(date,regex):#Works
files = []
files = sorted((glob.glob('*'+regex+'*')),key=numericalSort,reverse=False)
if date.lower() == 'last':
files = files.pop()
else:
files = [item for item in files if re.search(date,item)]
return files
def plotConc(data,ozone,times):
# This function plots data versus time
import datetime as dt
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
#time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
x = date2num(time)
legend1 = []
legend2 = []
fig = plt.figure('Gas Concentration Readings for East St.Louis')
ax1 = fig.add_subplot(111)
ax2 = twinx()
for key,value in data.items():
ax1.plot_date(x,data[key],'-',xdate=True)
legend1.append(key)
for key, value in ozone.items():
ax2.plot_date(x,ozone[key],'-.',xdate=True)
legend2.append(key)
title('Gas Concentrations for East St. Louis', fontsize = 12)
ax1.set_ylabel(r'$Concentration(ppb)$', fontsize = 12)
ax2.set_ylabel(r'$Concentration(ppb)$', fontsize = 12)
xlabel(r"$Time \, Stamp$", fontsize = 12)
ax1.legend(legend1,loc='upper right')
ax2.legend(legend2,loc='lower right')
grid(True)
return
def plotBankRelays(data,relays,times):
# This function plots data versus time
import datetime as dt
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
x = date2num(time)
#x1 = [date.strftime("%m-%d %H:%M:%S") for date in time]
legend1 = []
legend2 = []
#plt.locator_params(axis='x', nbins=4)
fig = plt.figure('VAPS Thermocouple Readings: Chart 2')
ax1 = fig.add_subplot(111)
ax2 = twinx()
for key,value in data.items():
ax1.plot_date(x,data[key],'-',xdate=True)
legend1.append(key)
for key,value in relays.items():
ax2.plot_date(x,relays[key],'--',xdate=True)
legend2.append(key)
title('VAPS Temperatures: Chart 2', fontsize = 12)
ax1.set_ylabel(r'$Temperature(^oC)$', fontsize = 12)
ax2.set_ylabel(r'$Relay \, States$', fontsize = 12)
ax1.set_xlabel(r"$Time \, Stamp$", fontsize = 12)
#print [num2date(item) for item in ax1.get_xticks()]
#ax1.set_xticks(x)
#ax1.set_xticklabels([date.strftime("%m-%d %H:%M %p") for date in time])
#ax1.legend(bbox_to_anchor=(0.,1.02,1.,.102),loc=3,ncol=2,mode="expand",borderaxespad=0.)
ax1.legend(legend1,loc='upper right')
ax2.legend(legend2,loc='lower right')
#ax1.xaxis.set_major_formatter(FormatStrFormatter(date.strftime("%m-%d %H:%M:%S")))
plt.subplots_adjust(bottom=0.15)
grid(True)
return
def goodFiles(files,goodHeaders,delim): # Good
irregFiles = 0
goodFiles = []
for file in files:
lineNo = 0
falseCount = 0
FILE = open(file,'r')
for line in FILE:
line = line.strip()
if lineNo == 5:
# Check all the headings to make sure the file is good
head = line.split(delim)
for item in head:
if item in goodHeaders:
continue
else:
falseCount += 1
if falseCount == 0:
goodFiles.append(file)
else:
irregFiles += 1
lineNo += 1
else:
lineNo += 1
continue
FILE.close
return goodFiles, irregFiles
| mit | -279,067,604,541,142,340 | 27.297753 | 97 | 0.561842 | false |
sguazt/prometheus | tools/giws/datatypes/stringDataGiws.py | 1 | 10567 | #!/usr/bin/python -u
# Copyright or Copr. INRIA/Scilab - Sylvestre LEDRU
#
# Sylvestre LEDRU - <sylvestre.ledru@inria.fr> <sylvestre@ledru.info>
#
# This software is a computer program whose purpose is to generate C++ wrapper
# for Java objects/methods.
#
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
#
# For more information, see the file COPYING
from datatypes.dataGiws import dataGiws
from configGiws import configGiws
from JNIFrameWork import JNIFrameWork
class stringDataGiws(dataGiws):
nativeType="char *"
callMethod="CallObjectMethod"
callStaticMethod="CallStaticObjectMethod"
temporaryVariableName="myStringBuffer"
def getTypeSignature(self):
return "Ljava/lang/String;"
def getJavaTypeSyntax(self):
if self.isArray():
return "jobjectArray"
else:
return "jstring"
def getRealJavaType(self):
return "java.lang.String"
def getDescription(self):
return "Java String"
def getNativeType(self, ForceNotArray=False, UseConst=False):
if self.isArray():
if UseConst:
pointer = " const*"
else:
pointer = "*"
return ("char" + pointer) + pointer * self.getDimensionArray()
else:
if UseConst:
pointer = " const*"
else:
pointer = "*"
return "char" + pointer
def __errorMemoryString(self, detachThread):
# Management of the error when not enought memory to create the string
if configGiws().getThrowsException():
errorMgntMemBis="""%sthrow %s::JniBadAllocException(curEnv);"""%(detachThread,configGiws().getExceptionFileName())
else:
errorMgntMemBis="""std::cerr << "Could not convert C string to Java UTF string, memory full." << std::endl;%s
exit(EXIT_FAILURE);"""%(detachThread)
return errorMgntMemBis
def specificPreProcessing(self, parameter, detachThread):
""" Overrides the preprocessing of the array """
name=parameter.getName()
# Management of the error when not enought memory to create the string
if configGiws().getThrowsException():
errorMgntMem="""%sthrow %s::JniBadAllocException(curEnv);"""%(detachThread,configGiws().getExceptionFileName())
else:
errorMgntMem="""std::cerr << "Could not allocate Java string array, memory full." << std::endl;%s
exit(EXIT_FAILURE);"""%(detachThread)
errorMgntMemBis = self.__errorMemoryString(detachThread)
if self.isArray():
if self.getDimensionArray() == 1:
return """
// create java array of strings.
jobjectArray %s_ = curEnv->NewObjectArray( %sSize, stringArrayClass, NULL);
if (%s_ == NULL)
{
%s
}
// convert each char * to java strings and fill the java array.
for ( int i = 0; i < %sSize; i++)
{
jstring TempString = curEnv->NewStringUTF( %s[i] );
if (TempString == NULL)
{
%s
}
curEnv->SetObjectArrayElement( %s_, i, TempString);
// avoid keeping reference on too many strings
curEnv->DeleteLocalRef(TempString);
}"""%(name,name,name,errorMgntMem,name,name,errorMgntMemBis,name)
else:
return """
// create java array of array of strings.
jobjectArray %s_ = curEnv->NewObjectArray( %sSize, curEnv->FindClass("[Ljava/lang/String;"), NULL);
if (%s_ == NULL)
{
%s
}
for ( int i = 0; i < %sSize; i++)
{
jobjectArray %sLocal = curEnv->NewObjectArray( %sSizeCol, stringArrayClass, NULL);
// convert each char * to java strings and fill the java array.
for ( int j = 0; j < %sSizeCol; j++) {
jstring TempString = curEnv->NewStringUTF( %s[i][j] );
if (TempString == NULL)
{
%s
}
curEnv->SetObjectArrayElement( %sLocal, j, TempString);
// avoid keeping reference on too many strings
curEnv->DeleteLocalRef(TempString);
}
curEnv->SetObjectArrayElement(%s_, i, %sLocal);
curEnv->DeleteLocalRef(%sLocal);
}"""%(name,name,name,errorMgntMem,name,name,name,name,name,errorMgntMemBis,name,name,name,name)
else:
# Need to store is for the post processing (delete)
self.parameterName=name
tempName=name+"_"
return """
jstring %s = curEnv->NewStringUTF( %s );
if (%s != NULL && %s == NULL)
{
%s
}
"""%(tempName,name,name,tempName,errorMgntMemBis)
def specificPostProcessing(self, detachThread):
""" Called when we are returning a string or an array of string """
# We are doing an exception check here JUST in this case because
# in methodGiws::__createMethodBody we usually do it at the end
# of the method just after deleting the variable
# but when dealing with string, in this method, we are calling some
# methods which override the "exception engine" which drive the JNI
# engine crazy.
str=JNIFrameWork().getExceptionCheckProfile(detachThread)
str=str+"if (res != NULL) { "
if self.isArray():
strCommon=""
strDeclaration=""
if configGiws().getDisableReturnSize()==True:
strCommon+="int lenRow;"
else:
# The size of the array is returned as output argument of the function
strDeclaration="*"
strCommon+="""
%s lenRow = curEnv->GetArrayLength(res);
"""%(strDeclaration)
self.temporaryVariableName="arrayOfString"
if self.getDimensionArray() == 1:
str+=strCommon+"""
char **arrayOfString;
arrayOfString = new char *[%slenRow];
for (jsize i = 0; i < %slenRow; i++){
jstring resString = reinterpret_cast<jstring>(curEnv->GetObjectArrayElement(res, i));
const char *tempString = curEnv->GetStringUTFChars(resString, 0);
arrayOfString[i] = new char[strlen(tempString) + 1];
strcpy(arrayOfString[i], tempString);
curEnv->ReleaseStringUTFChars(resString, tempString);
curEnv->DeleteLocalRef(resString);
}
"""%(strDeclaration, strDeclaration)
return str
else:
if configGiws().getDisableReturnSize()==True:
str+="int lenCol;"
str+=strCommon+"""
char ***arrayOfString;
arrayOfString = new char **[%slenRow];
for (jsize i = 0; i < %slenRow; i++){ /* Line of the array */
jobjectArray resStringLine = reinterpret_cast<jobjectArray>(curEnv->GetObjectArrayElement(res, i));
%slenCol = curEnv->GetArrayLength(resStringLine);
arrayOfString[i]=new char*[%slenCol];
for (jsize j = 0; j < %slenCol; j++){
jstring resString = reinterpret_cast<jstring>(curEnv->GetObjectArrayElement(resStringLine, j));
const char *tempString = curEnv->GetStringUTFChars(resString, 0);
arrayOfString[i][j] = new char[strlen(tempString) + 1];
strcpy(arrayOfString[i][j], tempString);
curEnv->ReleaseStringUTFChars(resString, tempString);
curEnv->DeleteLocalRef(resString);
}
curEnv->DeleteLocalRef(resStringLine);
}
"""%(strDeclaration, strDeclaration, strDeclaration, strDeclaration, strDeclaration)
return str
else:
if hasattr(self,"parameterName"):
str+="""curEnv->DeleteLocalRef(%s);"""%(self.parameterName+"_")
str=str+"""
const char *tempString = curEnv->GetStringUTFChars(res, 0);
char * %s = new char[strlen(tempString) + 1];
strcpy(%s, tempString);
curEnv->ReleaseStringUTFChars(res, tempString);
curEnv->DeleteLocalRef(res);
"""%(self.temporaryVariableName, self.temporaryVariableName)
return str
def getReturnSyntax(self):
str=""
if self.isArray():
str = str + """
curEnv->DeleteLocalRef(res);
return arrayOfString;
"""
else:
str = str + """
return %s;
"""%(self.temporaryVariableName)
str = str + """ } else {
curEnv->DeleteLocalRef(res);
return NULL;
}"""
return str
| apache-2.0 | -4,025,532,918,523,767,000 | 39.48659 | 126 | 0.578972 | false |
ypid/series60-remote | pc/lib/log.py | 1 | 1490 | # -*- coding: utf-8 -*-
# Copyright (c) 2008 - 2009 Lukas Hetzenecker <LuHe@gmx.at>
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import logging
class QtStreamHandler(logging.Handler):
def __init__(self, parent, main):
logging.Handler.__init__(self)
self.parent = parent
self.main = main
self.textWidget = parent
self.formater = logging.Formatter("%(message)s")
def setFormatter(self, format):
self.formater = format
def createLock(self):
self.mutex = QMutex()
def acquire(self):
self.mutex.lock()
def release(self):
self.mutex.unlock()
def emit(self,record):
self.textWidget.appendPlainText(self.formater.format(record))
self.textWidget.moveCursor(QTextCursor.StartOfLine)
self.textWidget.ensureCursorVisible()
class QtOutput(object):
def __init__(self, parent, out=None, color=None):
self.textWidget = parent
self.out = out
self.color = color
def write(self, m):
self.textWidget.moveCursor(QTextCursor.End)
if self.color:
tc = self.textWidget.textColor()
self.textWidget.setTextColor(self.color)
self.textWidget.insertPlainText( m )
if self.color:
self.textWidget.setTextColor(tc)
if self.out:
if isinstance(m, unicode):
self.out.write(m.encode("utf8"))
else:
self.out.write(m)
| gpl-2.0 | 9,074,983,590,830,688,000 | 25.140351 | 69 | 0.606711 | false |
virt-who/virt-who | virtwho/manager/subscriptionmanager/subscriptionmanager.py | 1 | 16260 | # -*- coding: utf-8 -*-
from __future__ import print_function
"""
Module for communication with subscription-manager, part of virt-who
Copyright (C) 2011 Radek Novacek <rnovacek@redhat.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import os
import json
from six.moves.http_client import BadStatusLine
from six import string_types
import rhsm.connection as rhsm_connection
import rhsm.certificate as rhsm_certificate
import rhsm.config as rhsm_config
from virtwho.config import NotSetSentinel
from virtwho.manager import Manager, ManagerError, ManagerFatalError, ManagerThrottleError
from virtwho.virt import AbstractVirtReport
from virtwho.util import generate_correlation_id
class SubscriptionManagerError(ManagerError):
pass
class SubscriptionManagerUnregisteredError(ManagerFatalError):
pass
# Mapping between strings returned from getJob and report statuses
STATE_MAPPING = {
'FINISHED': AbstractVirtReport.STATE_FINISHED,
'CANCELED': AbstractVirtReport.STATE_CANCELED,
'FAILED': AbstractVirtReport.STATE_FAILED,
'RUNNING': AbstractVirtReport.STATE_PROCESSING,
'WAITING': AbstractVirtReport.STATE_PROCESSING,
'CREATED': AbstractVirtReport.STATE_PROCESSING,
}
class NamedOptions(object):
"""
Object used for compatibility with RHSM
"""
pass
class SubscriptionManager(Manager):
sm_type = "sam"
""" Class for interacting subscription-manager. """
def __init__(self, logger, options):
self.logger = logger
self.options = options
self.cert_uuid = None
self.rhsm_config = None
self.cert_file = None
self.key_file = None
self.readConfig()
self.connection = None
self.correlation_id = generate_correlation_id()
def readConfig(self):
""" Parse rhsm.conf in order to obtain consumer
certificate and key paths. """
self.rhsm_config = rhsm_config.initConfig(
rhsm_config.DEFAULT_CONFIG_PATH)
consumer_cert_dir = self.rhsm_config.get("rhsm", "consumerCertDir")
cert = 'cert.pem'
key = 'key.pem'
self.cert_file = os.path.join(consumer_cert_dir, cert)
self.key_file = os.path.join(consumer_cert_dir, key)
def _check_owner_lib(self, kwargs, config):
"""
Try to check values of env and owner. These values has to be
equal to values obtained from Satellite server.
:param kwargs: dictionary possibly containing valid username and
password used for connection to rhsm
:param config: Configuration of virt-who
:return: None
"""
if config is None:
return
# Check 'owner' and 'env' only in situation, when these values
# are set and rhsm_username and rhsm_password are not set
if 'username' not in kwargs and 'password' not in kwargs and \
'owner' in config.keys() and 'env' in config.keys():
pass
else:
return
uuid = self.uuid()
consumer = self.connection.getConsumer(uuid)
if 'environment' in consumer:
environment = consumer['environment']
else:
return
if environment:
environment_name = environment['name']
owner = self.connection.getOwner(uuid)
owner_id = owner['key']
if config['owner'] != owner_id:
raise ManagerError(
"Cannot send data to: %s, because owner from configuration: %s is different" %
(owner_id, config['owner'])
)
if config['env'] != environment_name:
raise ManagerError(
"Cannot send data to: %s, because Satellite env: %s differs from configuration: %s" %
(owner_id, environment_name, config['env'])
)
def _connect(self, config=None):
""" Connect to the subscription-manager. """
kwargs = {
'host': self.rhsm_config.get('server', 'hostname'),
'ssl_port': int(self.rhsm_config.get('server', 'port')),
'handler': self.rhsm_config.get('server', 'prefix'),
'proxy_hostname': self.rhsm_config.get('server', 'proxy_hostname'),
'proxy_port': self.rhsm_config.get('server', 'proxy_port'),
'proxy_user': self.rhsm_config.get('server', 'proxy_user'),
'proxy_password': self.rhsm_config.get('server', 'proxy_password'),
'insecure': self.rhsm_config.get('server', 'insecure')
}
kwargs_to_config = {
'host': 'rhsm_hostname',
'ssl_port': 'rhsm_port',
'handler': 'rhsm_prefix',
'proxy_hostname': 'rhsm_proxy_hostname',
'proxy_port': 'rhsm_proxy_port',
'proxy_user': 'rhsm_proxy_user',
'proxy_password': 'rhsm_proxy_password',
'insecure': 'rhsm_insecure'
}
rhsm_username = None
rhsm_password = None
if config:
try:
rhsm_username = config['rhsm_username']
rhsm_password = config['rhsm_password']
except KeyError:
pass
if rhsm_username == NotSetSentinel:
rhsm_username = None
if rhsm_password == NotSetSentinel:
rhsm_password = None
# Testing for None is necessary, it might be an empty string
for key, value in kwargs.items():
try:
from_config = config[kwargs_to_config[key]]
if from_config is not NotSetSentinel and from_config is \
not None:
if key is 'ssl_port':
from_config = int(from_config)
kwargs[key] = from_config
except KeyError:
continue
if rhsm_username and rhsm_password:
self.logger.debug("Authenticating with RHSM username %s", rhsm_username)
kwargs['username'] = rhsm_username
kwargs['password'] = rhsm_password
else:
self.logger.debug("Authenticating with certificate: %s", self.cert_file)
if not os.access(self.cert_file, os.R_OK):
raise SubscriptionManagerUnregisteredError(
"Unable to read certificate, system is not registered or you are not root")
kwargs['cert_file'] = self.cert_file
kwargs['key_file'] = self.key_file
self.logger.info("X-Correlation-ID: %s", self.correlation_id)
if self.correlation_id:
kwargs['correlation_id'] = self.correlation_id
self.connection = rhsm_connection.UEPConnection(**kwargs)
try:
if not self.connection.ping()['result']:
raise SubscriptionManagerError(
"Unable to obtain status from server, UEPConnection is likely not usable."
)
except rhsm_connection.RateLimitExceededException as e:
raise ManagerThrottleError(e.retry_after)
except BadStatusLine:
raise ManagerError("Communication with subscription manager interrupted")
self._check_owner_lib(kwargs, config)
return self.connection
def sendVirtGuests(self, report, options=None):
"""
Update consumer facts with info about virtual guests.
`guests` is a list of `Guest` instances (or it children).
"""
guests = report.guests
self._connect()
# Sort the list
guests.sort(key=lambda item: item.uuid)
serialized_guests = [guest.toDict() for guest in guests]
self.logger.info('Sending update in guests lists for config '
'"%s": %d guests found',
report.config.name, len(guests))
self.logger.debug("Domain info: %s", json.dumps(serialized_guests, indent=4))
# Send list of guest uuids to the server
try:
self.connection.updateConsumer(self.uuid(), guest_uuids=serialized_guests, hypervisor_id=report.hypervisor_id)
except rhsm_connection.GoneException:
raise ManagerError("Communication with subscription manager failed: consumer no longer exists")
except rhsm_connection.RateLimitExceededException as e:
raise ManagerThrottleError(e.retry_after)
report.state = AbstractVirtReport.STATE_FINISHED
def hypervisorCheckIn(self, report, options=None):
""" Send hosts to guests mapping to subscription manager. """
connection = self._connect(report.config)
is_async = self._is_rhsm_server_async(report, connection)
serialized_mapping = self._hypervisor_mapping(report, is_async, connection)
self.logger.debug("Host-to-guest mapping being sent to '{owner}': {mapping}".format(
owner=report.config['owner'],
mapping=json.dumps(serialized_mapping, indent=4)))
# All subclasses of ConfigSection use dictionary like notation,
# but RHSM uses attribute like notation
if options:
named_options = NamedOptions()
for key, value in options['global'].items():
setattr(named_options, key, value)
else:
named_options = None
try:
try:
result = self.connection.hypervisorCheckIn(
report.config['owner'],
report.config['env'],
serialized_mapping,
options=named_options) # pylint:disable=unexpected-keyword-arg
except TypeError:
# This is temporary workaround until the options parameter gets implemented
# in python-rhsm
self.logger.debug(
"hypervisorCheckIn method in python-rhsm doesn't understand options parameter, ignoring"
)
result = self.connection.hypervisorCheckIn(report.config['owner'], report.config['env'], serialized_mapping)
except BadStatusLine:
raise ManagerError("Communication with subscription manager interrupted")
except rhsm_connection.RateLimitExceededException as e:
raise ManagerThrottleError(e.retry_after)
except rhsm_connection.GoneException:
raise ManagerError("Communication with subscription manager failed: consumer no longer exists")
except rhsm_connection.ConnectionException as e:
if hasattr(e, 'code'):
raise ManagerError("Communication with subscription manager failed with code %d: %s" % (e.code, str(e)))
raise ManagerError("Communication with subscription manager failed: %s" % str(e))
if is_async is True:
report.state = AbstractVirtReport.STATE_CREATED
report.job_id = result['id']
else:
report.state = AbstractVirtReport.STATE_FINISHED
return result
def _is_rhsm_server_async(self, report, connection=None):
"""
Check if server has capability 'hypervisor_async'.
"""
if connection is None:
self._connect(report.config)
self.logger.debug("Checking if server has capability 'hypervisor_async'")
is_async = hasattr(self.connection, 'has_capability') and self.connection.has_capability('hypervisors_async')
if is_async:
self.logger.debug("Server has capability 'hypervisors_async'")
else:
self.logger.debug("Server does not have 'hypervisors_async' capability")
return is_async
def _hypervisor_mapping(self, report, is_async, connection=None):
"""
Return mapping of hypervisor
"""
if connection is None:
self._connect(report.config)
mapping = report.association
serialized_mapping = {}
ids_seen = []
if is_async:
hosts = []
# Transform the mapping into the async version
for hypervisor in mapping['hypervisors']:
if hypervisor.hypervisorId in ids_seen:
self.logger.warning("The hypervisor id '%s' is assigned to 2 different systems. "
"Only one will be recorded at the server." % hypervisor.hypervisorId)
hosts.append(hypervisor.toDict())
ids_seen.append(hypervisor.hypervisorId)
serialized_mapping = {'hypervisors': hosts}
else:
# Reformat the data from the mapping to make it fit with
# the old api.
for hypervisor in mapping['hypervisors']:
if hypervisor.hypervisorId in ids_seen:
self.logger.warning("The hypervisor id '%s' is assigned to 2 different systems. "
"Only one will be recorded at the server." % hypervisor.hypervisorId)
guests = [g.toDict() for g in hypervisor.guestIds]
serialized_mapping[hypervisor.hypervisorId] = guests
ids_seen.append(hypervisor.hypervisorId)
return serialized_mapping
def check_report_state(self, report):
# BZ 1554228
job_id = str(report.job_id)
self._connect(report.config)
self.logger.debug('Checking status of job %s', job_id)
try:
result = self.connection.getJob(job_id)
except BadStatusLine:
raise ManagerError("Communication with subscription manager interrupted")
except rhsm_connection.RateLimitExceededException as e:
raise ManagerThrottleError(e.retry_after)
except rhsm_connection.ConnectionException as e:
if hasattr(e, 'code'):
raise ManagerError("Communication with subscription manager failed with code %d: %s" % (e.code, str(e)))
raise ManagerError("Communication with subscription manager failed: %s" % str(e))
state = STATE_MAPPING.get(result['state'], AbstractVirtReport.STATE_FAILED)
report.state = state
if state not in (AbstractVirtReport.STATE_FINISHED,
AbstractVirtReport.STATE_CANCELED,
AbstractVirtReport.STATE_FAILED):
self.logger.debug('Job %s not finished', job_id)
else:
# log completed job status
result_data = result.get('resultData', {})
if not result_data:
self.logger.warning("Job status report without resultData: %s", result)
return
if isinstance(result_data, string_types):
self.logger.warning("Job status report encountered the following error: %s", result_data)
return
for fail in result_data.get('failedUpdate', []):
self.logger.error("Error during update list of guests: %s", str(fail))
self.logger.debug("Number of mappings unchanged: %d", len(result_data.get('unchanged', [])))
self.logger.info("Mapping for config \"%s\" updated", report.config.name)
def uuid(self):
""" Read consumer certificate and get consumer UUID from it. """
if not self.cert_uuid:
try:
certificate = rhsm_certificate.create_from_file(self.cert_file)
self.cert_uuid = certificate.subject["CN"]
except Exception as e:
raise SubscriptionManagerError("Unable to open certificate %s (%s):" % (self.cert_file, str(e)))
return self.cert_uuid
| gpl-2.0 | 2,120,471,579,209,106,700 | 40.692308 | 124 | 0.610701 | false |
naiquevin/jinger | jinger/test/test_site.py | 1 | 1107 | # import unittest
import os
from jinger.site import create_empty_site, createdir
from jinger.test import DIR_PLAYGROUND, JingerPlaygroundTest
class SiteTest(JingerPlaygroundTest):
def test_create_dir(self):
mysite = createdir(DIR_PLAYGROUND, 'mysite')
self.assertTrue(os.path.exists(mysite))
# check that if the dir already exists, it raises an Exception
pass
def test_create_empty_site(self):
create_empty_site('mysite', DIR_PLAYGROUND)
newsite = os.path.join(DIR_PLAYGROUND, 'mysite')
os.path.exists(newsite)
os.path.exists(os.path.join(newsite, 'templates'))
os.path.exists(os.path.join(newsite, 'public'))
os.path.exists(os.path.join(newsite, 'config.json'))
create_empty_site('myothersite', DIR_PLAYGROUND, '_source', 'www')
newsite = os.path.join(DIR_PLAYGROUND, 'myothersite')
os.path.exists(newsite)
os.path.exists(os.path.join(newsite, '_source'))
os.path.exists(os.path.join(newsite, 'www'))
os.path.exists(os.path.join(newsite, 'config.json'))
| mit | 2,501,200,863,623,185,000 | 34.709677 | 74 | 0.661247 | false |
kata198/usrsvc | usrsvcmod/Monitoring/ActivityFile.py | 1 | 3670 | '''
Copyright (c) 2016 Tim Savannah All Rights Reserved.
This software is licensed under the terms of the GPLv3.
This may change at my discretion, retroactively, and without notice.
You should have received a copy of this with the source distribution as a file titled, LICENSE.
The most current license can be found at:
https://github.com/kata198/usrsvc/LICENSE
This location may need to be changed at some point in the future, in which case
you are may email Tim Savannah <kata198 at gmail dot com>, or find them on the
current website intended for distribution of usrsvc.
ActivityFileMonitor - Asserts that a specific file or directory should be modified within a certain threshold
'''
# vim:set ts=4 shiftwidth=4 softtabstop=4 expandtab :
import os
import time
from func_timeout import FunctionTimedOut
from . import MonitoringBase
from ..logging import logMsg, logErr
# TODO: We need to implement the check here as launching and joining on a thread, so that we don't lockup all monitoring if someone
# uses an NFS file on a disconnected device or anything else that will result in an indefinite uninterruptable ("D") state.
class ActivityFileMonitor(MonitoringBase):
'''
ActivityFileMonitor - Class for doing activity file monitoring
'''
def __init__(self, programName, activityFile, activityFileLimit):
MonitoringBase.__init__(self)
self.programName = programName
self.activityFile = activityFile
self.activityFileLimit = activityFileLimit
@classmethod
def createFromConfig(cls, programConfig):
if not programConfig.Monitoring.activityfile:
return None
return cls(programConfig.name, programConfig.Monitoring.activityfile, programConfig.Monitoring.activityfile_limit)
def shouldRestart(self, program=None):
'''
Returns True if activity file has not been modified within the threshold specified by activityfile_limit (should restart), otherwise False.
@param program - unused.
'''
activityFile = self.activityFile
activityFileLimit = self.activityFileLimit
programName = self.programName
if not activityFile:
# Yes this is checked twice if created through createFromConfig, but it may be called otherwise so better safe.
return False
try:
# If activity file is not present, this is a fail and we restart.
if not os.path.exists(activityFile):
self.setReason('Restarting %s because activity file ( %s ) does not exist\n' %(programName, activityFile,))
return True
# Gather the mtime and see if we are past the threshold
lastModified = os.stat(activityFile).st_mtime
now = time.time()
threshold = float(now - self.activityFileLimit)
if lastModified < threshold:
self.setReason('Restarting %s because it has not modified activity file ( %s ) in %.4f seconds. Limit is %d seconds.\n' %(programName, activityFile, float(now - lastModified), activityFileLimit) )
return True
except FunctionTimedOut:
logErr('MONITOR: ActivityFile timed out on %s\n' %(programName,))
raise
except Exception as e:
# If we got an exception, just log and try again next round.
logErr('Got an exception in activity file monitoring. Not restarting program. Program="%s" activityfile="%s"\nlocals: %s\n' %(programName, activityFile, str(locals())))
return False
# vim:set ts=4 shiftwidth=4 softtabstop=4 expandtab :
| lgpl-2.1 | 5,772,787,185,983,751,000 | 40.704545 | 212 | 0.687193 | false |
stefanoteso/musm-adt17 | musm/pc.py | 1 | 4018 | import numpy as np
import gurobipy as gurobi
from .problem import Problem
class PC(Problem):
_ATTRIBUTES = [
('cpu', 37),
('hd', 10),
('manufacturer', 8),
('ram', 10),
('monitor', 8),
('pctype', 3),
]
_ATTR_TO_COSTS = {
'pctype': [50, 0, 80],
'manufacturer': [100, 0, 100, 50, 0, 0, 50, 50],
'cpu' : [
1.4*100, 1.4*130, 1.1*70, 1.1*90, 1.2*80, 1.2*50, 1.2*60, 1.2*80,
1.2*90, 1.2*100, 1.2*110, 1.2*120, 1.2*130, 1.2*140, 1.2*170,
1.5*50, 1.5*60, 1.5*80, 1.5*90, 1.5*100, 1.5*110, 1.5*130, 1.5*150,
1.5*160, 1.5*170, 1.5*180, 1.5*220, 1.4*27, 1.4*30, 1.4*40, 1.4*45,
1.4*50, 1.4*55, 1.4*60, 1.4*70, 1.6*70, 1.6*73,
],
'monitor': [
0.6*100, 0.6*104, 0.6*120, 0.6*133, 0.6*140, 0.6*150, 0.6*170,
0.6*210
],
'ram': [
0.8*64, 0.8*128, 0.8*160, 0.8*192, 0.8*256, 0.8*320, 0.8*384,
0.8*512, 0.8*1024, 0.8*2048
],
'hd': [
4*8, 4*10, 4*12, 4*15, 4*20, 4*30, 4*40, 4*60, 4*80, 4*120
],
}
def __init__(self, **kwargs):
super().__init__(sum(attr[1] for attr in self._ATTRIBUTES))
self.cost_matrix = np.hstack([
np.array(self._ATTR_TO_COSTS[attr], dtype=float)
for attr, _ in self._ATTRIBUTES
]).reshape((1, -1)) / 2754.4
def _add_constraints(self, model, x):
base, offs = 0, {}
for attr, size in self._ATTRIBUTES:
offs[attr] = base
x_attr = [x[z] for z in range(base, base + size)]
model.addConstr(gurobi.quicksum(x_attr) == 1)
base += size
def implies(head, body):
# NOTE here we subtract 1 from head and body bits because the bit
# numbers in the constraints were computed starting from one, to
# work in MiniZinc, while Gurobi expects them to start from zero
head = 1 - x[head - 1]
body = gurobi.quicksum([x[i - 1] for i in body])
return model.addConstr(head + body >= 1)
# Manufacturer -> Type
implies(offs['manufacturer'] + 2, [offs['pctype'] + i for i in [1, 2]])
implies(offs['manufacturer'] + 4, [offs['pctype'] + 1])
implies(offs['manufacturer'] + 6, [offs['pctype'] + 2])
implies(offs['manufacturer'] + 7, [offs['pctype'] + i for i in [1, 3]])
# Manufacturer -> CPU
implies(offs['manufacturer'] + 1, [offs['cpu'] + i for i in range(28, 37+1)])
implies(offs['manufacturer'] + 2, [offs['cpu'] + i for i in list(range(1, 4+1)) + list(range(6, 27+1))])
implies(offs['manufacturer'] + 7, [offs['cpu'] + i for i in list(range(1, 4+1)) + list(range(6, 27+1))])
implies(offs['manufacturer'] + 4, [offs['cpu'] + i for i in range(5, 27+1)])
implies(offs['manufacturer'] + 3, [offs['cpu'] + i for i in range(6, 27+1)])
implies(offs['manufacturer'] + 5, [offs['cpu'] + i for i in range(6, 27+1)])
implies(offs['manufacturer'] + 8, [offs['cpu'] + i for i in range(6, 27+1)])
implies(offs['manufacturer'] + 6, [offs['cpu'] + i for i in range(16, 27+1)])
# Type -> RAM
implies(offs['pctype'] + 1, [offs['ram'] + i for i in range(1, 9+1)])
implies(offs['pctype'] + 2, [offs['ram'] + i for i in [2, 5, 8, 9]])
implies(offs['pctype'] + 3, [offs['ram'] + i for i in [5, 8, 9, 10]])
# Type -> HD
implies(offs['pctype'] + 1, [offs['hd'] + i for i in range(1, 6+1)])
implies(offs['pctype'] + 2, [offs['hd'] + i for i in range(5, 10+1)])
implies(offs['pctype'] + 3, [offs['hd'] + i for i in range(5, 10+1)])
# Type -> Monitor
implies(offs['pctype'] + 1, [offs['monitor'] + i for i in range(1, 6+1)])
implies(offs['pctype'] + 2, [offs['monitor'] + i for i in range(6, 8+1)])
implies(offs['pctype'] + 3, [offs['monitor'] + i for i in range(6, 8+1)])
| mit | 4,937,620,061,646,593,000 | 43.153846 | 112 | 0.498507 | false |
hikelee/launcher | launcher/templatetags/helpers.py | 1 | 6201 | """
sentry.templatetags.sentry_helpers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import functools
import os.path
from collections import namedtuple
from datetime import timedelta
import pytz
import six
from django import template
from django.conf import settings
from django.template.defaultfilters import stringfilter
from django.utils import timezone
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from six.moves import range
from six.moves.urllib.parse import quote
from launcher.utils.strings import soft_break as _soft_break,soft_hyphenate,to_unicode,truncatechars
SentryVersion=namedtuple('SentryVersion',[
'current',
'latest',
'update_available',
'build',
])
register=template.Library()
truncatechars=register.filter(stringfilter(truncatechars))
truncatechars.is_safe=True
@register.filter
def multiply(x,y):
def coerce(value):
if isinstance(value,(six.integer_types,float)):
return value
try:
return int(value)
except ValueError:
return float(value)
return coerce(x)*coerce(y)
@register.filter
def pprint(value,break_after=10):
"""
break_after is used to define how often a <span> is
inserted (for soft wrapping).
"""
value=to_unicode(value)
return mark_safe(
u'<span></span>'.
join([escape(value[i:(i+break_after)]) for i in range(0,len(value),break_after)])
)
@register.filter
def is_url(value):
if not isinstance(value,six.string_types):
return False
if not value.startswith(('http://','https://')):
return False
if ' ' in value:
return False
return True
# seriously Django?
@register.filter
def subtract(value,amount):
return int(value)-int(amount)
@register.filter
def absolute_value(value):
return abs(int(value) if isinstance(value,six.integer_types) else float(value))
@register.filter
def has_charts(group):
from launcher.utils.db import has_charts
if hasattr(group,'_state'):
db=group._state.db or 'default'
else:
db='default'
return has_charts(db)
@register.filter
def as_sorted(value):
return sorted(value)
@register.filter
def small_count(v,precision=1):
if not v:
return 0
z=[
(1000000000,_('b')),
(1000000,_('m')),
(1000,_('k')),
]
v=int(v)
for x,y in z:
o,p=divmod(v,x)
if o:
if len(six.text_type(o))>2 or not p:
return '%d%s'%(o,y)
return ('%.{}f%s'.format(precision))%(v/float(x),y)
return v
@register.filter
def num_digits(value):
return len(six.text_type(value))
@register.filter
def to_str(data):
return six.text_type(data)
@register.filter
def is_none(value):
return value is None
@register.filter
def timesince(value,now=None):
from django.template.defaultfilters import timesince
if now is None:
now=timezone.now()
if not value:
return _('never')
if value<(now-timedelta(days=5)):
return value.date()
value=(' '.join(timesince(value,now).split(' ')[0:2])).strip(',')
if value==_('0 minutes'):
return _('just now')
if value==_('1 day'):
return _('yesterday')
return value+_(' ago')
@register.filter
def duration(value):
if not value:
return '0s'
# value is assumed to be in ms
value=value/1000.0
hours,minutes,seconds=0,0,0
if value>3600:
hours=value/3600
value=value%3600
if value>60:
minutes=value/60
value=value%60
seconds=value
output=[]
if hours:
output.append('%dh'%hours)
if minutes:
output.append('%dm'%minutes)
if seconds>1:
output.append('%0.2fs'%seconds)
elif seconds:
output.append('%dms'%(seconds*1000))
return ''.join(output)
@register.filter
def date(dt,arg=None):
from django.template.defaultfilters import date
if not timezone.is_aware(dt):
dt=dt.replace(tzinfo=timezone.utc)
return date(dt,arg)
@register.filter
def trim_schema(value):
return value.split('//',1)[-1]
@register.filter
def with_metadata(group_list,request):
group_list=list(group_list)
if request.user.is_authenticated() and group_list:
project=group_list[0].project
bookmarks=set(
project.bookmark_set.filter(
user=request.user,
group__in=group_list,
).values_list('group_id',flat=True)
)
else:
bookmarks=set()
# TODO(dcramer): this is obsolete and needs to pull from the tsdb backend
historical_data={}
for g in group_list:
yield g,{
'is_bookmarked':g.pk in bookmarks,
'historical_data':','.join(six.text_type(x[1]) for x in historical_data.get(g.id,[])),
}
@register.simple_tag
def percent(value,total,format=None):
if not (value and total):
result=0
else:
result=int(value)/float(total)*100
if format is None:
return int(result)
else:
return ('%%%s'%format)%result
@register.filter
def titlize(value):
return value.replace('_',' ').title()
@register.filter
def split(value,delim=''):
return value.split(delim)
@register.inclusion_tag('sentry/partial/github_button.html')
def github_button(user,repo):
return {
'user':user,
'repo':repo,
}
@register.filter
def urlquote(value,safe=''):
return quote(value.encode('utf8'),safe)
@register.filter
def basename(value):
return os.path.basename(value)
@register.filter
def user_display_name(user):
return user.name or user.username
@register.simple_tag(takes_context=True)
def localized_datetime(context,dt,format='DATETIME_FORMAT'):
request=context['request']
timezone=getattr(request,'timezone',None)
if not timezone:
timezone=pytz.timezone(settings.SENTRY_DEFAULT_TIME_ZONE)
dt=dt.astimezone(timezone)
return date(dt,format)
@register.filter
def format_userinfo(user):
parts=user.username.split('@')
if len(parts)==1:
username=user.username
else:
username=parts[0].lower()
return mark_safe('<span title="%s">%s</span>'%(escape(user.username),escape(username),))
@register.filter
def soft_break(value,length):
return _soft_break(
value,
length,
functools.partial(soft_hyphenate,length=max(length//10,10)),
)
| mit | 335,523,952,679,000,960 | 22.13806 | 100 | 0.687631 | false |
vigneshkarthi/satireguru | satire-bot.py | 1 | 3178 | import twitter
import yaml
import time
import pickle
import re
global match, api, msg, oldID
import random
msg = ''
#RegEx for parsing twitter handle from retrived
keyword = '';
#UTF_CHARS = ur'a-z0-9_\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u00ff'
#TAG_EXP = ur'(^|[^0-9A-Z&/]+)(#|\uff03)([0-9A-Z_]*[A-Z_]+[%s]*)' % UTF_CHARS
#TAG_REGEX = re.compile(TAG_EXP, re.UNICODE | re.IGNORECASE)
#Performs OAuth authentication, place all the neccessary keys in access.yaml
def authenticate():
global api
data = yaml.load(open("access.yaml"))
api = twitter.Api(consumer_key=data['consumer-key'],consumer_secret=data['consumer-secret'],access_token_key=data['access-key'],access_token_secret=data['access-secret'])
#Parses response.yaml to search and reply with relevant messages according to twitterhandles, fill your responses in response.yaml
def choose_reply():
global match, msg
comments = yaml.load(open("response.yaml"))
for name in comments['name']:
if(name['keyword']==match):
msg = random.choice(name['response'])
#Module which checks for mentions and replies to the mentioner and the person mentioned
#current version supports only one mentioned person
def get_and_post_replies(old):
cache_msg_to_post = ' '
global match, api
while(1):
try:
i = 0
repl = api.GetMentions()
total = len(repl)
newID = int(repl[i].id)
while(newID != old):
print repl[i].text+", by @"+repl[i].user.screen_name
if "pm" in repl[i].text.lower():
match = 'PM'
print "Match is", match
choose_reply()
msg_to_post = "@"+repl[i].user.screen_name+" "+msg
if(msg_to_post == cache_msg_to_post):
msg_to_post = msg_to_post + random.randint(0,1000)
cache_msg_to_post = msg_to_post
try:
api.PostUpdate(msg_to_post, in_reply_to_status_id=repl[i].id)
print "Msg posted is", msg_to_post
i = i+1
if (total == i):
break
newID = int(repl[i].id)
except twitter.TwitterError:
print "Something happend.. Saving ID's to file.. Not to Worry"
fileObj = open("idstore",'r+')
old = repl[0].id
fileObj.seek(0)
fileObj.write(str(old))
fileObj.close()
return
else:
i = i + 1
if (total == i):
break
newId = int(repl[i].id)
old = int(repl[0].id)
print "No New Tweets !!"
print "Gonna sleep for a minute :)"
time.sleep(60)
except KeyboardInterrupt:
fileObj = open("idstore", 'r+')
fileObj.seek(0)
fileObj.write(str(old))
print "Saving ID's to file.. Exiting!!"
return
authenticate()
fileObj = open("idstore",'r+')
old = fileObj.read()
old = int(old)
get_and_post_replies(old)
| gpl-2.0 | -6,554,156,474,092,308,000 | 35.113636 | 174 | 0.538704 | false |
CLVsol/oehealth | oehealth_dispensation/oehealth_dispensation.py | 1 | 9325 | # -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from osv import osv
from osv import fields
import time
class oehealth_dispensation(osv.Model):
_name='oehealth.dispensation'
def _compute_create_uid(self, cr, uid, ids, field_name, arg, context={}):
result = {}
for r in self.browse(cr, uid, ids, context=context):
perms = self.perm_read(cr, uid, ids)
create_uid = perms[0].get('create_uid', 'n/a')
result[r.id] = create_uid
return result
def _compute_create_date(self, cr, uid, ids, field_name, arg, context={}):
result = {}
for r in self.browse(cr, uid, ids, context=context):
perms = self.perm_read(cr, uid, ids)
create_date = perms[0].get('create_date', 'n/a')
result[r.id] = create_date
return result
def _compute_write_uid(self, cr, uid, ids, field_name, arg, context={}):
result = {}
for r in self.browse(cr, uid, ids, context=context):
perms = self.perm_read(cr, uid, ids)
write_uid = perms[0].get('write_uid', 'n/a')
result[r.id] = write_uid
return result
def _compute_write_date(self, cr, uid, ids, field_name, arg, context={}):
result = {}
for r in self.browse(cr, uid, ids, context=context):
perms = self.perm_read(cr, uid, ids)
write_date = perms[0].get('write_date', 'n/a')
result[r.id] = write_date
return result
_columns={
'name': fields.char(size=256, string='Dispensation ID', required=True,
help='Type in the ID of this dispensation'),
'dispensation_date': fields.date(string='Dispensation Date', required=True),
'prescription_date': fields.date(string='Prescription Date', required=True),
'prescriber_id': fields.many2one('oehealth.prescriber', string='Prescriber', required=True),
#'patient_id': fields.many2one('oehealth.patient', string='Patient', required=True),
#'pregnancy_warning': fields.boolean(string='Pregancy Warning', readonly=True),
'notes': fields.text(string='Prescription Notes'),
#'prescription_line': fields.one2many('oehealth.dispensation.line',
# 'pbm_prescription_order_id',
# string='Dispensation line',),
'prescription_line': fields.one2many('oehealth.medicament.template',
'dispensation_id',
string='Prescription lines',),
#'pbm_prescription_warning_ack': fields.boolean(string='Dispensation verified'),
#'user_id': fields.many2one('res.users', string='Prescribing Doctor', required=True),
'active': fields.boolean('Active', help="The active field allows you to hide the dispensation without removing it."),
'state': fields.selection([('new','New'),
('revised','Revised'),
('waiting','Waiting'),
('okay','Okay')], 'Stage', readonly=True),
'create_uid': fields.function(_compute_create_uid, method=True, type='char', string='Create User',),
'create_date': fields.function(_compute_create_date, method=True, type='datetime', string='Create Date',),
'write_uid': fields.function(_compute_write_uid, method=True, type='char', string='Write User',),
'write_date': fields.function(_compute_write_date, method=True, type='datetime', string='Write Date',),
}
_sql_constraints = [
('uniq_name', 'unique(name)', "The Dispensation ID must be unique!"),
]
_defaults={
'name': '/',
'dispensation_date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'active': 1,
'state': 'new',
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if not 'name' in vals or vals['name'] == '/':
val = self.pool.get('ir.sequence').get(cr, uid, 'oehealth.dispensation.code')
code = map(int, str(val))
code_len = len(code)
while len(code) < 14:
code.insert(0, 0)
while len(code) < 16:
n = sum([(len(code) + 1 - i) * v for i, v in enumerate(code)]) % 11
if n > 1:
f = 11 - n
else:
f = 0
code.append(f)
code_str = "%s.%s.%s.%s.%s-%s" % (str(code[0]) + str(code[1]),
str(code[2]) + str(code[3]) + str(code[4]),
str(code[5]) + str(code[6]) + str(code[7]),
str(code[8]) + str(code[9]) + str(code[10]),
str(code[11]) + str(code[12]) + str(code[13]),
str(code[14]) + str(code[15]))
if code_len <= 3:
vals['name'] = code_str[18 - code_len:21]
elif code_len > 3 and code_len <= 6:
vals['name'] = code_str[17 - code_len:21]
elif code_len > 6 and code_len <= 9:
vals['name'] = code_str[16 - code_len:21]
elif code_len > 9 and code_len <= 12:
vals['name'] = code_str[15 - code_len:21]
elif code_len > 12 and code_len <= 14:
vals['name'] = code_str[14 - code_len:21]
return super(oehealth_dispensation, self).create(cr, uid, vals, context)
def oehealth_dispensation_new(self, cr, uid, ids):
self.write(cr, uid, ids, {'state': 'new'})
return True
def oehealth_dispensation_revised(self, cr, uid, ids):
self.write(cr, uid, ids, {'state': 'revised'})
return True
def oehealth_dispensation_waiting(self, cr, uid, ids):
self.write(cr, uid, ids, {'state': 'waiting'})
return True
def oehealth_dispensation_okay(self, cr, uid, ids):
self.write(cr, uid, ids, {'state': 'okay'})
return True
def get_authorization(self, cr, uid, ids, context={}):
data=ids
test_request_obj = self.pool.get('oehealth.dispensation')
lab_obj = self.pool.get('oehealth.dispensation')
test_report_data={}
test_cases = []
test_obj = test_request_obj.browse(cr, uid, context.get('active_id'), context=context)
#if test_obj.state == 'tested':
if test_obj.state != 'tested':
#raise osv.except_osv(_('UserError'),_('Test Report already created.'))
raise osv.except_osv(('UserError'),('Test Report already created.'))
test_report_data['test'] = test_obj.name.id
test_report_data['patient'] = test_obj.patient_id.id
#test_report_data['requestor'] = test_obj.doctor_id.id
test_report_data['date_requested'] = test_obj.date
for criterion in test_obj.name.criteria:
test_cases.append((0,0,{'name':criterion.name,
'sequence':criterion.sequence,
'normal_range':criterion.normal_range,
'unit':criterion.unit.id,
}))
test_report_data['criteria'] = test_cases
lab_id = lab_obj.create(cr,uid,test_report_data,context=context)
test_request_obj.write(cr, uid, context.get('active_id'), {'state':'tested'})
return {
'domain': "[('id','=', "+str(lab_id)+")]",
'name': 'Lab Test Report',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'oehealth.lab_test',
'type': 'ir.actions.act_window'
}
oehealth_dispensation()
| agpl-3.0 | -279,910,869,561,610,850 | 49.405405 | 125 | 0.500268 | false |
shadowk29/cusumtools | legacy/minimal_psd.py | 1 | 12009 | ## COPYRIGHT
## Copyright (C) 2015 Kyle Briggs (kbrig035<at>uottawa.ca)
##
## This file is part of cusumtools.
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib
matplotlib.use('TkAgg')
import numpy as np
import tkinter.filedialog
import tkinter as tk
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import scipy.io as sio
from scipy.signal import bessel, filtfilt, welch
from scikits.samplerate import resample
import pylab as pl
import glob
import os
import time
import pandas as pd
from pandasql import sqldf
import re
def make_format(current, other):
# current and other are axes
def format_coord(x, y):
# x, y are data coordinates
# convert to display coords
display_coord = current.transData.transform((x,y))
inv = other.transData.inverted()
# convert back to data coords with respect to ax
ax_coord = inv.transform(display_coord)
coords = [ax_coord, (x, y)]
return ('Left: {:<40} Right: {:<}'
.format(*['({:.3f}, {:.3f})'.format(x, y) for x,y in coords]))
return format_coord
class App(tk.Frame):
def __init__(self, parent,file_path):
tk.Frame.__init__(self, parent)
parent.deiconify()
self.events_flag = False
self.baseline_flag = False
self.file_path = file_path
##### Trace plotting widgets #####
self.trace_frame = tk.LabelFrame(parent,text='Current Trace')
self.trace_fig = Figure(figsize=(7,5), dpi=100)
self.trace_canvas = FigureCanvasTkAgg(self.trace_fig, master=self.trace_frame)
self.trace_toolbar_frame = tk.Frame(self.trace_frame)
self.trace_toolbar = NavigationToolbar2TkAgg(self.trace_canvas, self.trace_toolbar_frame)
self.trace_toolbar.update()
self.trace_frame.grid(row=0,column=0,columnspan=6,sticky=tk.N+tk.S)
self.trace_toolbar_frame.grid(row=1,column=0,columnspan=6)
self.trace_canvas.get_tk_widget().grid(row=0,column=0,columnspan=6)
##### PSD plotting widgets #####
self.psd_frame = tk.LabelFrame(parent,text='Power Spectrum')
self.psd_fig = Figure(figsize=(7,5), dpi=100)
self.psd_canvas = FigureCanvasTkAgg(self.psd_fig, master=self.psd_frame)
self.psd_toolbar_frame = tk.Frame(self.psd_frame)
self.psd_toolbar = NavigationToolbar2TkAgg(self.psd_canvas, self.psd_toolbar_frame)
self.psd_toolbar.update()
self.psd_frame.grid(row=0,column=6,columnspan=6,sticky=tk.N+tk.S)
self.psd_toolbar_frame.grid(row=1,column=6,columnspan=6)
self.psd_canvas.get_tk_widget().grid(row=0,column=6,columnspan=6)
##### Control widgets #####
self.control_frame = tk.LabelFrame(parent, text='Controls')
self.control_frame.grid(row=2,column=0,columnspan=6,sticky=tk.N+tk.S+tk.E+tk.W)
self.start_entry = tk.Entry(self.control_frame)
self.start_entry.insert(0,'0')
self.start_label = tk.Label(self.control_frame, text='Start Time (s)')
self.start_label.grid(row=0,column=0,sticky=tk.E+tk.W)
self.start_entry.grid(row=0,column=1,sticky=tk.E+tk.W)
self.end_entry = tk.Entry(self.control_frame)
self.end_entry.insert(0,'10')
self.end_label = tk.Label(self.control_frame, text='End Time (s)')
self.end_label.grid(row=0,column=2,sticky=tk.E+tk.W)
self.end_entry.grid(row=0,column=3,sticky=tk.E+tk.W)
self.cutoff_entry = tk.Entry(self.control_frame)
self.cutoff_entry.insert(0,'')
self.cutoff_label = tk.Label(self.control_frame, text='Cutoff (Hz)')
self.cutoff_label.grid(row=1,column=0,sticky=tk.E+tk.W)
self.cutoff_entry.grid(row=1,column=1,sticky=tk.E+tk.W)
self.order_entry = tk.Entry(self.control_frame)
self.order_entry.insert(0,'')
self.order_label = tk.Label(self.control_frame, text='Filter Order')
self.order_label.grid(row=1,column=2,sticky=tk.E+tk.W)
self.order_entry.grid(row=1,column=3,sticky=tk.E+tk.W)
self.samplerate_entry = tk.Entry(self.control_frame)
self.samplerate_entry.insert(0,'250000')
self.samplerate_label = tk.Label(self.control_frame, text='Sampling Frequency (Hz)')
self.samplerate_label.grid(row=1,column=4,sticky=tk.E+tk.W)
self.samplerate_entry.grid(row=1,column=5,sticky=tk.E+tk.W)
self.savegain_entry = tk.Entry(self.control_frame)
self.savegain_entry.insert(0,'1')
self.savegain_label = tk.Label(self.control_frame, text='Sampling Frequency (Hz)')
self.savegain_label.grid(row=0,column=4,sticky=tk.E+tk.W)
self.savegain_entry.grid(row=0,column=5,sticky=tk.E+tk.W)
self.plot_trace = tk.Button(self.control_frame, text='Update Trace', command=self.update_trace)
self.plot_trace.grid(row=2,column=0,columnspan=2,sticky=tk.E+tk.W)
self.normalize = tk.IntVar()
self.normalize.set(0)
self.normalize_check = tk.Checkbutton(self.control_frame, text='Normalize', variable = self.normalize)
self.normalize_check.grid(row=2,column=2,sticky=tk.E+tk.W)
self.plot_psd = tk.Button(self.control_frame, text='Update PSD', command=self.update_psd)
self.plot_psd.grid(row=2,column=3,sticky=tk.E+tk.W)
##### Feedback Widgets #####
self.feedback_frame = tk.LabelFrame(parent, text='Status')
self.feedback_frame.grid(row=2,column=6,columnspan=6,sticky=tk.N+tk.S+tk.E+tk.W)
self.export_psd = tk.Button(self.feedback_frame, text='Export PSD',command=self.export_psd)
self.export_psd.grid(row=1,column=0,columnspan=6,sticky=tk.E+tk.W)
self.export_trace = tk.Button(self.feedback_frame, text='Export Trace',command=self.export_trace)
self.export_trace.grid(row=2,column=0,columnspan=6,sticky=tk.E+tk.W)
self.load_memmap()
self.initialize_samplerate()
def export_psd(self):
try:
data_path = tkinter.filedialog.asksaveasfilename(defaultextension='.csv',initialdir='G:\PSDs for Sam')
np.savetxt(data_path,np.c_[self.f, self.Pxx, self.rms],delimiter=',')
except AttributeError:
self.wildcard.set('Plot the PSD first')
def export_trace(self):
try:
data_path = tkinter.filedialog.asksaveasfilename(defaultextension='.csv',initialdir='G:\Analysis\Pores\NPN\PSDs')
np.savetxt(data_path,self.plot_data,delimiter=',')
except AttributeError:
self.wildcard.set('Plot the trace first')
def load_mapped_data(self):
self.total_samples = len(self.map)
self.samplerate = int(self.samplerate_entry.get())
if self.start_entry.get()!='':
self.start_time = float(self.start_entry.get())
start_index = int((float(self.start_entry.get())*self.samplerate))
else:
self.start_time = 0
start_index = 0
if self.end_entry.get()!='':
self.end_time = float(self.end_entry.get())
end_index = int((float(self.end_entry.get())*self.samplerate))
if end_index > self.total_samples:
end_index = self.total_samples
self.data = self.map[start_index:end_index]
self.data = float(self.savegain_entry.get()) * self.data
def load_memmap(self):
columntypes = np.dtype([('current', '>i2'), ('voltage', '>i2')])
self.map = np.memmap(self.file_path, dtype=columntypes, mode='r')['current']
def integrate_noise(self, f, Pxx):
df = f[1]-f[0]
return np.sqrt(np.cumsum(Pxx * df))
def filter_data(self):
cutoff = float(self.cutoff_entry.get())
order = int(self.order_entry.get())
Wn = 2.0 * cutoff/float(self.samplerate)
b, a = bessel(order,Wn,'low')
padding = 1000
padded = np.pad(self.data, pad_width=padding, mode='median')
self.filtered_data = filtfilt(b, a, padded, padtype=None)[padding:-padding]
def initialize_samplerate(self):
self.samplerate = float(self.samplerate_entry.get())
##### Plot Updating functions #####
def update_trace(self):
self.initialize_samplerate()
self.load_mapped_data()
self.filtered_data = self.data
self.plot_data = self.filtered_data
plot_samplerate = self.samplerate
if self.cutoff_entry.get()!='' and self.order_entry!='':
self.filter_data()
self.plot_data = self.filtered_data
self.trace_fig.clf()
a = self.trace_fig.add_subplot(111)
time = np.linspace(1.0/self.samplerate,len(self.plot_data)/float(self.samplerate),len(self.plot_data))+self.start_time
a.set_xlabel(r'Time ($\mu s$)')
a.set_ylabel('Current (pA)')
self.trace_fig.subplots_adjust(bottom=0.14,left=0.21)
a.plot(time*1e6,self.plot_data,'.',markersize=1)
self.trace_canvas.show()
def update_psd(self):
self.initialize_samplerate()
self.load_mapped_data()
self.filtered_data = self.data
self.plot_data = self.filtered_data
plot_samplerate = self.samplerate
if self.cutoff_entry.get()!='' and self.order_entry!='':
self.filter_data()
self.plot_data = self.filtered_data
maxf = 2*float(self.cutoff_entry.get())
else:
maxf = 2*float(self.samplerate_entry.get())
length = np.minimum(2**18,len(self.filtered_data))
end_index = int(np.floor(len(self.filtered_data)/length)*length)
current = np.average(self.filtered_data[:end_index])
f, Pxx = welch(self.filtered_data, plot_samplerate,nperseg=length)
self.rms = self.integrate_noise(f, Pxx)
if self.normalize.get():
Pxx /= current**2
Pxx *= maxf/2.0
self.rms /= np.absolute(current)
self.f = f
self.Pxx = Pxx
minf = 1
BW_index = np.searchsorted(f, maxf/2)
logPxx = np.log10(Pxx[1:BW_index])
minP = 10**np.floor(np.amin(logPxx))
maxP = 10**np.ceil(np.amax(logPxx))
self.psd_fig.clf()
a = self.psd_fig.add_subplot(111)
a.set_xlabel('Frequency (Hz)')
a.set_ylabel(r'Spectral Power ($\mathrm{pA}^2/\mathrm{Hz}$)')
a.set_xlim(minf, maxf)
a.set_ylim(minP, maxP)
self.psd_fig.subplots_adjust(bottom=0.14,left=0.21)
a.loglog(f[1:],Pxx[1:],'b-')
for tick in a.get_yticklabels():
tick.set_color('b')
a2 = a.twinx()
a2.semilogx(f, self.rms, 'r-')
a2.set_ylabel('RMS Noise (pA)')
a2.set_xlim(minf, maxf)
for tick in a2.get_yticklabels():
tick.set_color('r')
a2.format_coord = make_format(a2, a)
self.psd_canvas.show()
def main():
root=tk.Tk()
root.withdraw()
file_path = tkinter.filedialog.askopenfilename(initialdir='C:/Data/')
App(root,file_path).grid(row=0,column=0)
root.mainloop()
if __name__=="__main__":
main()
| gpl-3.0 | 2,097,180,032,333,189,600 | 38.503289 | 126 | 0.615955 | false |
syhpoon/xyzcmd | libxyz/vfs/vfsobj.py | 1 | 8497 | #-*- coding: utf8 -*
#
# Max E. Kuznecov ~syhpoon <syhpoon@syhpoon.name> 2008
#
# This file is part of XYZCommander.
# XYZCommander is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# XYZCommander is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
# You should have received a copy of the GNU Lesser Public License
# along with XYZCommander. If not, see <http://www.gnu.org/licenses/>.
import os
from libxyz.core.utils import bstring, ustring
from libxyz.vfs import types, util
class VFSObject(object):
"""
Abstract interface for VFS objects
"""
def __init__(self, xyz, path, full_path, ext_path, driver, parent,
enc=None, **kwargs):
self.xyz = xyz
self.enc = enc or xyzenc
# Internal VFS path
self.path = bstring(path, self.enc)
# Full VFS path
self.full_path = bstring(full_path, self.enc)
# External VFS path
self.ext_path = bstring(ext_path, self.enc)
self.parent = parent
self.driver = driver
self.kwargs = kwargs
self.fileobj = None
# File name
self.name = os.path.basename(self.path)
# File type
self.ftype = None
# Access time
self.atime = None
# Modified time
self.mtime = None
# Changed time
self.ctime = None
# Size in bytes
self.size = None
# Owner UID
self.uid = None
# Group
self.gid = None
# Mode
self.mode = None
# Inode
self.inode = None
# Visual file type
self.vtype = None
# Visual file representation
self.visual = None
# File info
self.info = None
# Any type-specific data
self.data = None
# List of significant attributes
self.attributes = ()
self.__ni_msg = _(u"Feature not implemented")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_file(self):
"""
Return True if instance is representing regular file
"""
return isinstance(self.ftype, types.VFSTypeFile)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_dir(self):
"""
Return True if instance is representing directory
"""
return isinstance(self.ftype, types.VFSTypeDir)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_dir_empty(self):
"""
Return True if instance is representing directory and it is empty
"""
if not self.is_dir():
return False
_, _, objs = self.walk()
return len(objs) == 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_link(self):
"""
Return True if instance is representing soft link
"""
return isinstance(self.ftype, types.VFSTypeLink)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_char(self):
"""
Return True if instance is representing soft char device
"""
return isinstance(self.ftype, types.VFSTypeChar)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_block(self):
"""
Return True if instance is representing block device
"""
return isinstance(self.ftype, types.VFSTypeBlock)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_fifo(self):
"""
Return True if instance is representing FIFO
"""
return isinstance(self.ftype, types.VFSTypeFifo)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_socket(self):
"""
Return True if instance is representing socket
"""
return isinstance(self.ftype, types.VFSTypeSocket)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def copy(self, path, existcb=None, errorcb=None,
save_attrs=True, follow_links=False, cancel=None):
"""
Copy file to specified location
@param path: Local path to copy file to
@param existcb: Callback function to be called if there exists
an object in target directory with the same name.
Callback function receives VFSObject instance as an
argument and must return one of:
'override' - to override this very object
'override all' - to override any future collisions
'skip' - to skip the object
'skip all' - to skip all future collisions
'abort' - to abort the process.
If no existscb provided 'abort' is used as default
@param errorcb: Callback function to be called in case an error occured
during copying. Function receives VFSObject instance
and error string as arguments and must return one of:
'skip' - to continue the process
'skip all' - to skip all future errors
'abort' - to abort the process.
If no errorcb provided 'abort' is used as default
@param save_attrs: Whether to save object attributes
@param follow_links: Whether to follow symlinks
@param cancel: a threading.Event instance, if it is found set - abort
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def move(self, path, existcb=None, errorcb=None, save_attrs=True,
follow_links=False, cancel=None):
"""
Move object
Arguments are the same as for copy()
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def mkdir(self, newdir):
"""
Create new dir inside object (only valid for directory object types)
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def remove(self, recursive=True):
"""
[Recursively] remove object
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def walk(self):
"""
Directory tree walker
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def open(self, mode='r'):
"""
Open self object in provided mode
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def close(self):
"""
Close self object
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def read(self, bytes=None):
"""
Read bytes from self object
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def tell(self):
"""
Tell file position
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def seek(self, offset, whence=None):
"""
Perform seek() on object
"""
raise NotImplementedError(self.__ni_msg)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def in_dir(self, d, e):
"""
Filter only those archive entries which exist in the same
directory level
"""
if e.startswith(d.lstrip(os.sep)) and \
len(util.split_path(e)) == (len(util.split_path(d)) + 1):
return True
else:
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __repr__(self):
return self.__str__()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __unicode__(self):
return ustring(self.__str__())
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __del__(self):
if self.fileobj:
try:
self.close()
except Exception:
pass
| gpl-3.0 | -442,618,524,861,205,300 | 26.146965 | 79 | 0.485348 | false |
317070/kaggle-heart | ira/configurations/gauss_roi10_maxout.py | 1 | 9185 | from collections import namedtuple
import lasagne as nn
from lasagne.layers.dnn import Conv2DDNNLayer, MaxPool2DDNNLayer
import data_iterators
import numpy as np
import theano.tensor as T
from functools import partial
import utils_heart
import nn_heart
from pathfinder import PKL_TRAIN_DATA_PATH, TRAIN_LABELS_PATH, PKL_VALIDATE_DATA_PATH
import utils
import data
caching = None
restart_from_save = None
rng = np.random.RandomState(42)
patch_size = (64, 64)
train_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': (128, 128),
'mask_roi': True,
'rotation_range': (-180, 180),
'translation_range_x': (-5, 10),
'translation_range_y': (-10, 10),
'shear_range': (0, 0),
'roi_scale_range': (0.95, 1.3),
'zoom_range': (1 / 1.5, 1.5),
'do_flip': True,
'sequence_shift': False
}
valid_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': (128, 128),
'mask_roi': True,
}
test_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': (128, 128),
'mask_roi': True,
'rotation_range': (-180, 180),
'translation_range_x': (-5, 10),
'translation_range_y': (-10, 10),
'shear_range': (0, 0),
'roi_scale_range': (0.95, 1.3),
'zoom_range': (1., 1.),
'do_flip': True,
'sequence_shift': False
}
data_prep_fun = data.transform_norm_rescale_after
batch_size = 32
nbatches_chunk = 16
chunk_size = batch_size * nbatches_chunk
train_valid_ids = utils.get_train_valid_split(PKL_TRAIN_DATA_PATH)
train_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_TRAIN_DATA_PATH,
batch_size=chunk_size,
transform_params=train_transformation_params,
patient_ids=train_valid_ids['train'],
labels_path=TRAIN_LABELS_PATH,
slice2roi_path='pkl_train_slice2roi_10.pkl',
full_batch=True, random=True, infinite=True,
data_prep_fun=data_prep_fun)
valid_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_TRAIN_DATA_PATH,
batch_size=chunk_size,
transform_params=valid_transformation_params,
patient_ids=train_valid_ids['valid'],
labels_path=TRAIN_LABELS_PATH,
slice2roi_path='pkl_train_slice2roi_10.pkl',
full_batch=False, random=False, infinite=False,
data_prep_fun=data_prep_fun)
test_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_VALIDATE_DATA_PATH,
batch_size=chunk_size,
transform_params=test_transformation_params,
slice2roi_path='pkl_validate_slice2roi_10.pkl',
full_batch=False, random=False, infinite=False,
data_prep_fun=data_prep_fun)
nchunks_per_epoch = train_data_iterator.nsamples / chunk_size
max_nchunks = nchunks_per_epoch * 150
learning_rate_schedule = {
0: 0.0002,
int(max_nchunks * 0.1): 0.0001,
int(max_nchunks * 0.3): 0.000075,
int(max_nchunks * 0.6): 0.00005,
int(max_nchunks * 0.9): 0.00001
}
validate_every = 2 * nchunks_per_epoch
save_every = 2 * nchunks_per_epoch
conv3 = partial(Conv2DDNNLayer,
stride=(1, 1),
pad="same",
filter_size=(3, 3),
nonlinearity=nn.nonlinearities.very_leaky_rectify,
b=nn.init.Constant(0.1),
W=nn.init.Orthogonal("relu"))
max_pool = partial(MaxPool2DDNNLayer,
pool_size=(2, 2),
stride=(2, 2))
def build_model(l_in=None):
l_in = nn.layers.InputLayer((None, 30) + patch_size) if not l_in else l_in
l = conv3(l_in, num_filters=128)
l = conv3(l, num_filters=128)
l = max_pool(l)
l = conv3(l, num_filters=128)
l = conv3(l, num_filters=128)
l = max_pool(l)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l_d01 = nn.layers.DenseLayer(l, num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=None)
l_d01 = nn.layers.FeaturePoolLayer(l_d01, pool_size=2)
l_d02 = nn.layers.DenseLayer(nn.layers.dropout(l_d01), num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=None)
l_d02 = nn.layers.FeaturePoolLayer(l_d02, pool_size=2)
mu0 = nn.layers.DenseLayer(nn.layers.dropout(l_d02), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(50), nonlinearity=nn_heart.lb_softplus())
sigma0 = nn.layers.DenseLayer(nn.layers.dropout(l_d02), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(10), nonlinearity=nn_heart.lb_softplus())
l_cdf0 = nn_heart.NormalCDFLayer(mu0, sigma0, sigma_logscale=False, mu_logscale=False)
# ---------------------------------------------------------------
l_d11 = nn.layers.DenseLayer(l, num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=None)
l_d11 = nn.layers.FeaturePoolLayer(l_d11, pool_size=2)
l_d12 = nn.layers.DenseLayer(nn.layers.dropout(l_d11), num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=None)
l_d12 = nn.layers.FeaturePoolLayer(l_d12, pool_size=2)
mu1 = nn.layers.DenseLayer(nn.layers.dropout(l_d12), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(100), nonlinearity=nn_heart.lb_softplus())
sigma1 = nn.layers.DenseLayer(nn.layers.dropout(l_d12), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(10), nonlinearity=nn_heart.lb_softplus())
l_cdf1 = nn_heart.NormalCDFLayer(mu1, sigma1, sigma_logscale=False, mu_logscale=False)
l_outs = [l_cdf0, l_cdf1]
l_top = nn.layers.MergeLayer(l_outs)
l_target_mu0 = nn.layers.InputLayer((None, 1))
l_target_mu1 = nn.layers.InputLayer((None, 1))
l_targets = [l_target_mu0, l_target_mu1]
dense_layers = [l_d01, l_d02, l_d11, l_d12, mu0, sigma0, mu0, mu1]
mu_layers = [mu0, mu1]
sigma_layers = [sigma0, sigma1]
return namedtuple('Model', ['l_ins', 'l_outs', 'l_targets', 'l_top', 'dense_layers', 'mu_layers', 'sigma_layers'])(
[l_in], l_outs, l_targets,
l_top, dense_layers, mu_layers, sigma_layers)
def build_objective(model, deterministic=False):
p0 = nn.layers.get_output(model.l_outs[0], deterministic=deterministic)
t0 = nn.layers.get_output(model.l_targets[0])
t0_heaviside = nn_heart.heaviside(t0)
crps0 = T.mean((p0 - t0_heaviside) ** 2)
p1 = nn.layers.get_output(model.l_outs[1], deterministic=deterministic)
t1 = nn.layers.get_output(model.l_targets[1])
t1_heaviside = nn_heart.heaviside(t1)
crps1 = T.mean((p1 - t1_heaviside) ** 2)
return 0.5 * (crps0 + crps1)
def build_updates(train_loss, model, learning_rate):
updates = nn.updates.adam(train_loss, nn.layers.get_all_params(model.l_top), learning_rate)
return updates
def get_mean_validation_loss(batch_predictions, batch_targets):
return [0, 0]
def get_mean_crps_loss(batch_predictions, batch_targets, batch_ids):
nbatches = len(batch_predictions)
npredictions = len(batch_predictions[0])
crpss = []
for i in xrange(npredictions):
p, t = [], []
for j in xrange(nbatches):
p.append(batch_predictions[j][i])
t.append(batch_targets[j][i])
p, t = np.vstack(p), np.vstack(t)
target_cdf = utils_heart.heaviside_function(t)
crpss.append(np.mean((p - target_cdf) ** 2))
return crpss
def get_avg_patient_predictions(batch_predictions, batch_patient_ids, mean):
return utils_heart.get_patient_average_cdf_predictions(batch_predictions, batch_patient_ids, mean)
| mit | 3,756,646,750,156,495,400 | 39.10917 | 119 | 0.552314 | false |
SafeW3rd/Ciphers | primeSieve.py | 1 | 1139 | # Prime Number Sieve
# http://inventwithpython.com/hacking (BSD Licensed)
import math
def isPrime(num):
# Returns True if num is a prime number, otherwise False.
# Note: Generally, isPrime() is slower than primeSieve().
# all numbers less than 2 are not prime
if num < 2:
return False
# see if num is divisible by any number up to the square root of num
for i in range(2, int(math.sqrt(num)) + 1):
if num % i == 0:
return False
return True
def primeSieve(sieveSize):
# Returns a list of prime numbers calculated using
# the Sieve of Eratosthenes algorithm.
sieve = [True] * sieveSize
sieve[0] = False # zero and one are not prime numbers
sieve[1] = False
# create the sieve
for i in range(2, int(math.sqrt(sieveSize)) + 1):
pointer = i * 2
while pointer < sieveSize:
sieve[pointer] = False
pointer += i
# compile the list of primes
primes = []
for i in range(sieveSize):
if sieve[i] == True:
primes.append(i)
return primes
| mit | -7,295,585,561,268,958,000 | 23.886364 | 72 | 0.587357 | false |
iamaris/CMUAnalysis | Common/generateObjectTree.py | 1 | 11728 | import re
import os
objects = ['Photon', 'Electron', 'Muon', 'Jet', 'Vertex']
susyObjects = {'Photon': 'Photon', 'Electron': 'Electron', 'Muon': 'Muon', 'Jet': 'PFJet', 'Vertex': 'Vertex'}
objectVars = file('ObjectVars.h')
classPat = re.compile('^[ ]*class[ ]+([a-zA-Z0-9]+)Vars[ ]*{')
cTorPat = re.compile('^[ ]*[a-zA-Z0-9]+Vars\([^,]+(,[ ]+Event.*|)\);')
varPat = re.compile('^[ ]*((?:unsigned[ ]|)(?:bool|char|short|int|unsigned|long|float|double))[ ]+([a-zA-Z_][a-zA-Z0-9_]*);')
useEvent = dict()
varList = dict()
obj = ''
for line in objectVars:
if '};' in line:
obj = ''
if obj:
cTorMatch = cTorPat.match(line)
if cTorMatch:
useEvent[obj] = len(cTorMatch.group(1)) != 0
varMatch = varPat.match(line)
if varMatch:
varList[obj].append((varMatch.group(1), varMatch.group(2)))
lineMatch = classPat.match(line)
if lineMatch and lineMatch.group(1) in objects:
obj = lineMatch.group(1)
varList[obj] = []
objectVars.close()
# GENERATE HEADER
headerContent = '''/* Auto-generated header file */
#ifndef ObjectTree_h
#define ObjectTree_h
#include "ObjectVars.h"
#include "TTree.h"
#include "TString.h"
namespace susy {
unsigned const NMAX(512);
'''
for obj in objects:
headerContent += '''
class ''' + obj + '''VarsArray {
public:
''' + obj + '''VarsArray() {}
~''' + obj + '''VarsArray() {}
void setBranches(TTree&);
void setAddress(TTree&);
void push_back(''' + obj + '''Vars const&);
void clear() { size = 0; }
''' + obj + '''Vars at(unsigned) const;
unsigned size;
'''
for (type, name) in varList[obj]:
headerContent += '''
''' + type + ' ' + name + '[NMAX];'
headerContent += '''
};
'''
headerContent += '''
class ObjectTree {
public:
ObjectTree();
~ObjectTree();
void setOutput(TString const&,'''
for i in range(len(objects)):
headerContent += ' bool = true'
if i != len(objects) - 1:
headerContent += ','
else:
headerContent += ');'
headerContent += '''
void setOutput(TTree&,'''
for i in range(len(objects)):
headerContent += ' bool = true'
if i != len(objects) - 1:
headerContent += ','
else:
headerContent += ');'
headerContent += '''
static void setBranchStatus(TTree&,'''
for i in range(len(objects)):
headerContent += ' bool = true'
if i != len(objects) - 1:
headerContent += ','
else:
headerContent += ');'
headerContent += '''
void initEvent(Event const&);
void fill() { output_->Fill(); }'''
for obj in objects:
lowerName = obj.lower()
headerContent += '''
void save(''' + obj + 'Vars const& _vars) { ' + lowerName + 'Array_.push_back(_vars); }'
for obj in objects:
lowerName = obj.lower()
headerContent += '''
unsigned get''' + obj + 'Size() const { return ' + lowerName + 'Array_.size; }'
for obj in objects:
lowerName = obj.lower()
headerContent += '''
''' + obj + 'VarsArray const& get' + obj + 'Array() const { return ' + lowerName + 'Array_; }'
headerContent += '''
private:
void setBranches_('''
for i in range(len(objects)):
headerContent += 'bool'
if i != len(objects) - 1:
headerContent += ', '
else:
headerContent += ');'
for obj in objects:
lowerName = obj.lower()
headerContent += '''
''' + obj + '''VarsArray ''' + lowerName + '''Array_;'''
headerContent += '''
unsigned runNumber_;
unsigned lumiNumber_;
unsigned eventNumber_;
TTree* output_;
bool ownOutput_;
};
}
#endif
'''
headerFile = file('ObjectTree.h', 'w')
headerFile.write(headerContent)
headerFile.close()
# GENERATE SRC
cTors = dict()
setBranches = dict()
setAddress = dict()
pushBack = dict()
at = dict()
for obj in objects:
lowerName = obj.lower()
cTorText = '''
''' + obj + 'Vars::' + obj + '''Vars() :'''
initList = ''
for (type, name) in varList[obj]:
initList += '''
''' + name + '('
if type == 'float' or type == 'double':
initList += '0.'
elif type == 'bool':
initList += 'false'
else:
initList += '0'
initList += '),'
initList = initList.rstrip(',')
cTorText += initList
cTorText += '''
{
}
'''
cTors[obj] = cTorText
setBranchText = '''
void
''' + obj + '''VarsArray::setBranches(TTree& _tree)
{
_tree.Branch("''' + lowerName + '.size", &size, "' + lowerName + '.size/i");'
for (type, name) in varList[obj]:
branch = '''
_tree.Branch("''' + lowerName + '.' + name + '", ' + name + ', "' + name + '[' + lowerName + '.size]/'
if type == 'char':
branch += 'B'
elif type == 'unsigned char':
branch += 'b'
elif type == 'short':
branch += 'S'
elif type == 'unsigned short':
branch += 's'
elif type == 'int':
branch += 'I'
elif type == 'unsigned' or type == 'unsigned int':
branch += 'i'
elif type == 'long':
branch += 'L'
elif type == 'unsigned long':
branch += 'l'
elif type == 'float':
branch += 'F'
elif type == 'double':
branch += 'D'
elif type == 'bool':
branch += 'O'
branch += '");'
setBranchText += branch
setBranchText += '''
}
'''
setBranches[obj] = setBranchText
setAddressText = '''
void
''' + obj + '''VarsArray::setAddress(TTree& _tree)
{
std::vector<TString> notFound;
_tree.SetBranchAddress("''' + lowerName + '.size", &size);'
for (type, name) in varList[obj]:
bName = lowerName + '.' + name
setAddressText += '''
if(_tree.GetBranch("''' + bName + '")) _tree.SetBranchAddress("' + bName + '", ' + name + ''');
else notFound.push_back("''' + bName + '");'
setAddressText += '''
for(unsigned iN(0); iN != notFound.size(); ++iN)
std::cerr << "Branch " << notFound[iN] << " not found in input" << std::endl;
}
'''
setAddress[obj] = setAddressText
pushBackText = '''
void
''' + obj + 'VarsArray::push_back(' + obj + '''Vars const& _vars)
{
if(size == NMAX - 1)
throw std::runtime_error("Too many ''' + obj + '''s");
'''
for (type, name) in varList[obj]:
pushBackText += '''
''' + name + '[size] = _vars.' + name + ';'
pushBackText += '''
++size;
}
'''
pushBack[obj] = pushBackText
atText = '''
''' + obj + '''Vars
''' + obj + '''VarsArray::at(unsigned _pos) const
{
if(_pos >= size)
throw std::runtime_error("''' + obj + '''Vars out-of-bounds");
''' + obj + '''Vars vars;
'''
for (type, name) in varList[obj]:
atText += '''
vars.''' + name + ' = ' + name + '[_pos];'
atText += '''
return vars;
}
'''
at[obj] = atText
preamble = '#include "ObjectVars.h"\n'
try:
originalSrc = file('ObjectVars.cc', 'r')
userDef = ''
copy = False
namespace = False
for line in originalSrc:
if 'namespace susy' in line:
namespace = True
if not namespace and 'ObjectVars.h' not in line and not re.match('^[ ]*/\*.*\*/[ ]*$', line):
preamble += line
if '/* START USER-DEFINED IMPLEMENTATION (DO NOT MODIFY THIS LINE) */' in line:
copy = True
if copy:
userDef += line
if '/* END USER-DEFINED IMPLEMENTATION (DO NOT MODIFY THIS LINE) */' in line:
copy = False
originalSrc.close()
except:
userDef = '\n/* START USER-DEFINED IMPLEMENTATION (DO NOT MODIFY THIS LINE) */\n'
for obj in objects:
userDef += '''
void
''' + obj + '''Vars::set(''' + susyObjects[obj] + ' const&'
if useEvent[obj]:
userDef += ', Event const&'
userDef += ''')
{
}
/*static*/
''' + obj + '''Vars::setBranchStatus(TTree&)
{
}
'''
userDef += '/* END USER-DEFINED IMPLEMENTATION (DO NOT MODIFY THIS LINE) */\n'
# ObjectTree.cc
objTreeContent = '''/* Auto-generated source file */
#include "ObjectTree.h"
#include "TFile.h"
#include <stdexcept>
#include <iostream>
namespace susy {
'''
for obj in objects:
objTreeContent += setBranches[obj]
objTreeContent += setAddress[obj]
objTreeContent += pushBack[obj]
objTreeContent += at[obj]
objTreeContent += '''
ObjectTree::ObjectTree() :'''
for obj in objects:
lowerName = obj.lower()
objTreeContent += '''
''' + lowerName + '''Array_(),'''
objTreeContent += '''
runNumber_(0),
lumiNumber_(0),
eventNumber_(0),
output_(0),
ownOutput_(false)
{
}
ObjectTree::~ObjectTree()
{
if(ownOutput_ && output_){
TFile* outFile(output_->GetCurrentFile());
outFile->cd();
output_->Write();
delete outFile;
}
}
void
ObjectTree::setOutput(TString const& _fileName'''
for obj in objects:
objTreeContent += ', bool _set' + obj + '/* = true*/'
objTreeContent += ''')
{
ownOutput_ = true;
TFile::Open(_fileName, "recreate");
output_ = new TTree("objectVars", "Object ID variables");
setBranches_('''
for obj in objects:
objTreeContent += '_set' + obj + ', '
objTreeContent = objTreeContent.rstrip(', ')
objTreeContent += ''');
}
void
ObjectTree::setOutput(TTree& _tree'''
for obj in objects:
objTreeContent += ', bool _set' + obj + '/* = true*/'
objTreeContent += ''')
{
output_ = &_tree;
setBranches_('''
for obj in objects:
objTreeContent += '_set' + obj + ', '
objTreeContent = objTreeContent.rstrip(', ')
objTreeContent += ''');
}
/*static*/
void
ObjectTree::setBranchStatus(TTree& _input'''
for obj in objects:
objTreeContent += ', bool _set' + obj + '/* = true*/'
objTreeContent += ''')
{
_input.SetBranchStatus("runNumber", 1);
_input.SetBranchStatus("luminosityBlockNumber", 1);
_input.SetBranchStatus("eventNumber", 1);
'''
for obj in objects:
objTreeContent += '''
if(_set''' + obj + ') ' + obj + 'Vars::setBranchStatus(_input);'
objTreeContent += '''
}
#ifdef STANDALONE
void
ObjectTree::initEvent(Event const&)
{
runNumber_ = 0;
lumiNumber_ = 0;
eventNumber_ = 0;
#else
void
ObjectTree::initEvent(Event const& _event)
{
runNumber_ = _event.runNumber;
lumiNumber_ = _event.luminosityBlockNumber;
eventNumber_ = _event.eventNumber;
#endif'''
for obj in objects:
objTreeContent += '''
''' + obj.lower() + 'Array_.clear();'
objTreeContent += '''
}
void
ObjectTree::setBranches_('''
for obj in objects:
objTreeContent += 'bool _set' + obj + ', '
objTreeContent = objTreeContent.rstrip(', ') + ')'
objTreeContent += '''
{
output_->Branch("runNumber", &runNumber_, "runNumber/i");
output_->Branch("lumiNumber", &lumiNumber_, "lumiNumber/i");
output_->Branch("eventNumber", &eventNumber_, "eventNumber/i");
'''
for obj in objects:
objTreeContent += '''
if(_set''' + obj + ') ' + obj.lower() + 'Array_.setBranches(*output_);'
objTreeContent += '''
}
'''
objTreeContent += '}\n'
objTreeFile = file('ObjectTree.cc', 'w')
objTreeFile.write(objTreeContent)
objTreeFile.close()
# ObjectVars.cc
objVarsContent = '''/* Partially auto-generated source file - edit where indicated */
/* Add necessary inclusions below */
''' + preamble + '''
namespace susy {
'''
for obj in objects:
objVarsContent += cTors[obj]
objVarsContent += '\n'
objVarsContent += userDef
objVarsContent += '''
}
'''
objVarsFile = file('ObjectVars.cc', 'w')
objVarsFile.write(objVarsContent)
objVarsFile.close()
| apache-2.0 | 6,525,613,012,333,786,000 | 21.339048 | 125 | 0.548857 | false |
dsimic/taxsims | ss.py | 1 | 1112 | import pandas as pd
import numpy as np
def ss_calc(
contrib_yearly, inv_gwth_rt, num_years, safe_withdrw_rate, start_age=28
):
"""
inv_gwth_rt is infaltion adjusted.
contrib_yearly is in first years dollars
"""
tot_years = max(0, 62 - start_age - num_years) + num_years
df = pd.DataFrame({
'contrib_yearly': [contrib_yearly] * num_years + [0.] *
max(0, (62 - num_years - start_age)),
'inv_value': [0] * tot_years,
}, index=range(tot_years))
for year in range(0, tot_years):
print year
multiplier = np.array([
(1. + inv_gwth_rt) ** (year - y_) for y_ in range(year + 1)])
print multiplier
df['inv_value'][year] = np.sum(
np.array(df['contrib_yearly'][0: year + 1]) * multiplier)
df['monthly_inv_income'] = safe_withdrw_rate * df['inv_value'] / 12.
df['monthly_inv_income_w_spouse'] = df['monthly_inv_income'] * 1.5
return df
if __name__ == "__main__":
df = ss_calc(15e3, .03, 10, .03)
ss_benefit_monthly = 939.00
ss_benefit_w_spouse_monthly = 1.5 * ss_benefit_monthly
| gpl-2.0 | -1,677,530,639,553,851,100 | 31.705882 | 75 | 0.57554 | false |
pacoqueen/bbinn | PyChart-1.39/demos/linestyles.py | 1 | 1258 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append("..")
#
# Copyright (C) 2000-2005 by Yasushi Saito (yasushi.saito@gmail.com)
#
# Pychart is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any
# later version.
#
# Pychart is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
from pychart import *
import pychart.doc_support
import chartdemo
import re
can = canvas.default_canvas()
x = 100
y = 500
def drawLine(style):
global x, y
name = pychart.doc_support.stringify_value(style)
name = re.sub("line_style\\.", "", name)
name = pychart.doc_support.break_string(name)
can.line(style, x, y, x+40, y)
#print "name=", name
height = font.text_height(name)[0] + 5
tb = text_box.T(text=name, loc=(x, y-height), line_style=None)
x = x + 60
tb.draw()
for style in line_style.standards.list():
drawLine(style)
if x >= chartdemo.MaxWidth:
x=100
y=y-40
| gpl-2.0 | 5,223,164,027,098,408,000 | 26.347826 | 72 | 0.68283 | false |
Psycojoker/wanawana | wanawana/settings.py | 1 | 2687 | """
Django settings for wanawana project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w2=4yi@cyc@vsio@$tvz$%&_po6si@533=cwh5kr2dk#pd69)v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'django_extensions',
'debug_toolbar',
'django_pdb',
'wanawana',
'users',
'events',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_pdb.middleware.PdbMiddleware',
)
ROOT_URLCONF = 'wanawana.urls'
TEMPLATE_LOADERS = (
'hamlpy.template.loaders.HamlPyFilesystemLoader',
'hamlpy.template.loaders.HamlPyAppDirectoriesLoader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
WSGI_APPLICATION = 'wanawana.wsgi.application'
# Email configuration
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
if DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
try:
from settings_local import *
except ImportError:
pass
| gpl-3.0 | 4,815,851,771,319,772,000 | 23.87963 | 71 | 0.723483 | false |
TamiaLab/carnetdumaker | apps/bugtracker/tests/test_context_processors.py | 1 | 2920 | """
Tests suite for the context processors of the bug tracker app.
"""
from django.test import SimpleTestCase
from django.http import HttpRequest
from ..context_processors import bugtracker
from ..constants import (STATUS_OPEN,
STATUS_NEED_DETAILS,
STATUS_CONFIRMED,
STATUS_WORKING_ON,
STATUS_DEFERRED,
STATUS_DUPLICATE,
STATUS_WONT_FIX,
STATUS_CLOSED,
STATUS_FIXED)
from ..constants import (PRIORITY_GODZILLA,
PRIORITY_CRITICAL,
PRIORITY_MAJOR,
PRIORITY_MINOR,
PRIORITY_TRIVIAL,
PRIORITY_NEED_REVIEW,
PRIORITY_FEATURE,
PRIORITY_WISHLIST,
PRIORITY_INVALID,
PRIORITY_NOT_MY_FAULT)
from ..constants import (DIFFICULTY_DESIGN_ERRORS,
DIFFICULTY_IMPORTANT,
DIFFICULTY_NORMAL,
DIFFICULTY_LOW_IMPACT,
DIFFICULTY_OPTIONAL)
class BugTrackerContextProcessorTestCase(SimpleTestCase):
"""
Tests case for the context processor.
"""
def test_bugtracker_context_update(self):
"""
Test if the ``bugtracker`` context processor add the constants into the context.
"""
request = HttpRequest()
result = bugtracker(request)
self.assertEqual(result, {
'BUGTRACKER_STATUS': {
'OPEN': STATUS_OPEN,
'NEED_DETAILS': STATUS_NEED_DETAILS,
'CONFIRMED': STATUS_CONFIRMED,
'WORKING_ON': STATUS_WORKING_ON,
'DEFERRED': STATUS_DEFERRED,
'DUPLICATE': STATUS_DUPLICATE,
'WONT_FIX': STATUS_WONT_FIX,
'CLOSED': STATUS_CLOSED,
'FIXED': STATUS_FIXED,
},
'BUGTRACKER_PRIORITY': {
'GODZILLA': PRIORITY_GODZILLA,
'CRITICAL': PRIORITY_CRITICAL,
'MAJOR': PRIORITY_MAJOR,
'MINOR': PRIORITY_MINOR,
'TRIVIAL': PRIORITY_TRIVIAL,
'NEED_REVIEW': PRIORITY_NEED_REVIEW,
'FEATURE': PRIORITY_FEATURE,
'WISHLIST': PRIORITY_WISHLIST,
'INVALID': PRIORITY_INVALID,
'NOT_MY_FAULT': PRIORITY_NOT_MY_FAULT,
},
'BUGTRACKER_DIFFICULTY': {
'DESIGN_ERRORS': DIFFICULTY_DESIGN_ERRORS,
'IMPORTANT': DIFFICULTY_IMPORTANT,
'NORMAL': DIFFICULTY_NORMAL,
'LOW_IMPACT': DIFFICULTY_LOW_IMPACT,
'OPTIONAL': DIFFICULTY_OPTIONAL,
},
})
| agpl-3.0 | -677,834,467,567,544,200 | 35.962025 | 88 | 0.492123 | false |
codedsk/hubcheck-hubzero-tests | hchztests/tests/test_website_support_need_help.py | 1 | 7124 | import pytest
import sys
import os
import re
import hubcheck
pytestmark = [ pytest.mark.website,
pytest.mark.tickets,
pytest.mark.need_help,
pytest.mark.reboot,
pytest.mark.upgrade,
pytest.mark.prod_safe_upgrade
]
class TestNeedHelp(hubcheck.testcase.TestCase2):
def setup_method(self,method):
# setup a web browser
self.browser.get(self.https_authority)
# get user account info
self.username,self.password = \
self.testdata.find_account_for('ticketsubmitter')
self.adminuser,self.adminpass = \
self.testdata.find_account_for('ticketmanager')
self.ticket_number = None
def teardown_method(self,method):
# if we created a ticket, delete the ticket
if self.ticket_number is not None \
and (self.adminuser != "") \
and (self.adminpass != ""):
try:
self.utils.account.logout()
except:
pass
self.utils.account.login_as(self.adminuser,self.adminpass)
self.utils.support.close_support_ticket_invalid(self.ticket_number)
def test_link_exists(self):
"""
click the need help link, to see if the widget exists
"""
po = self.catalog.load_pageobject('SupportNeedHelpPage')
po.open()
po.close()
@pytest.mark.nt
def test_link_changes_webpage(self):
"""
click the need help link, check if the url changes
"""
po = self.catalog.load_pageobject('GenericPage')
start_url = po.current_url()
po.toggle_needhelp()
end_url = po.current_url()
assert start_url == end_url, "clicking the 'Need Help?' link" \
+ " changed the web page from %s to %s" % (start_url,end_url)
def test_if_link_leads_to_support_url(self):
"""
open the "Need Help?" dialogue to ensure it does not lead to
/support
Sometime found when javascript is turned off, but if javascript
is on, clicking this link should not send the user to the
/support webpage.
"""
# store the start and end page url's for comparison
# click the needhelp link and see if it takes us to /support
po = self.catalog.load_pageobject('SupportNeedHelpPage')
startpageurl = po.current_url()
po.open()
endpageurl = po.current_url()
assert startpageurl == endpageurl, \
"User was redirected to %s\n" % endpageurl
# FIXME: use urlparse here
# create a pattern for a url regular expression
p = re.compile('(([^:]+)://)?([^:/]+)(:([0-9]+))?(/.*)?')
(junk, junk, junk, junk, junk, path) = p.search(endpageurl).groups()
# check that the page we were taken to is not /support
s = "pageurl = %s\npath = %s\n" % (endpageurl,path)
assert path != '/support', s
def test_submit_ticket_logged_in_using_need_help_link(self):
"""
login to the website as the "ticket submitter" and submit a
ticket using the need help link.
"""
problem = 'hubcheck test ticket\n%s' % (self.fnbase)
# login to the website and click the need help link
self.utils.account.login_as(self.username,self.password)
po = self.catalog.load_pageobject('SupportNeedHelpPage')
po.open()
# fill in the trouble report
# username, name, and email fields are
# not accessible while logged in
self.ticket_number = po.submit_ticket({'problem':problem})
# check if the ticket number is a valid number
assert self.ticket_number is not None, "no ticket number returned"
assert re.match('\d+',self.ticket_number) is not None, \
"cound not find a matching ticket number in '%s'" \
% (self.ticket_number)
# convert to a number and ensure it is not ticket #0
assert int(self.ticket_number) > 0, \
"invalid ticket number returned: %s" % (self.ticket_number)
@pytest.mark.captcha
def test_submit_ticket_logged_out_using_need_help_link(self):
"""
submit a support ticket using the need help link while not
logged into the website.
"""
# data for trouble report
data = {
'name' : 'hubcheck testuser',
'email' : 'hubchecktest@hubzero.org',
'problem' : 'hubcheck test ticket\n%s' % (self.fnbase),
'captcha' : True,
}
# navigate to the SupportNeedHelp Page:
po = self.catalog.load_pageobject('SupportNeedHelpPage')
po.open()
# fill in the trouble report
# username is optional
self.ticket_number = po.submit_ticket(data)
# check if the ticket number is a valid number
assert self.ticket_number is not None, \
"no ticket number returned"
assert re.match('\d+',self.ticket_number) is not None, \
"cound not find a matching ticket number in '%s'" \
% (self.ticket_number)
# convert to a number and ensure it is not ticket #0
assert int(self.ticket_number) > 0, \
"invalid ticket number returned: %s" % (self.ticket_number)
@pytest.mark.tickets_attach_jpg
def test_attaching_jpg_image_to_ticket_submitted_through_need_help(self):
"""
Login to the website and submit a ticket, using the need help
link, with an attached jpeg image.
"""
problem = 'hubcheck test ticket\nattaching jpg image\n%s' \
% (self.fnbase)
uploadfilename = 'app2.jpg'
uploadfilepath = os.path.join(self.datadir,'images',uploadfilename)
data = {
'problem' : problem,
'upload' : uploadfilepath,
}
# login to the website and navigate to the need help form
self.utils.account.login_as(self.username,self.password)
po = self.catalog.load_pageobject('SupportNeedHelpPage')
# po.open()
po.needhelplink.click()
# submit a trouble report
# username, name, and email fields are not accessible
self.ticket_number = po.submit_ticket(data)
assert self.ticket_number is not None, "no ticket number returned"
assert int(self.ticket_number) > 0, \
"invalid ticket number returned: %s" % (self.ticket_number)
po.goto_ticket()
po = self.catalog.load_pageobject('SupportTicketViewPage')
content = po.get_ticket_content()
imgsrc = content.download_image(uploadfilename)
# not sure how to really download image files yet.
# so we assume that as long as opening the image didn't
# cause an error, the test passed.
assert re.search(uploadfilename,imgsrc) is not None, \
"After uploading an image to support ticket" \
+ " #%s, could not download image %s" \
% (self.ticket_number,uploadfilename)
| mit | -6,689,282,357,007,949,000 | 32.28972 | 79 | 0.592504 | false |
jasper-meyer/Platformer | platformer.py | 1 | 3751 | """
platformer.py
Author: Jasper Meyer
Credit: You, the internet, Brendan
Assignment:
Write and submit a program that implements the sandbox platformer game:
https://github.com/HHS-IntroProgramming/Platformer
"""
from ggame import App, RectangleAsset, ImageAsset, Sprite, LineStyle, Color, Frame
SCREEN_WIDTH = 1080
SCREEN_HEIGHT = 720
myapp = App(SCREEN_WIDTH, SCREEN_HEIGHT)
black = Color(0, 1)
backcol = Color(0xd9ffcc, 1.0)
purp = Color(0x9900cc, 1.0)
blue = Color(0x3399ff,1.0)
noline = LineStyle(0, black)
bg_asset = RectangleAsset(SCREEN_WIDTH, SCREEN_HEIGHT, noline, backcol)
bg = Sprite(bg_asset, (0,0))
thinline = LineStyle(1, black)
sq = RectangleAsset (75,75, noline, black)
wub=0
pup=0
mousex=0
mousey=0
mousexround=0
mouseyround=0
play = RectangleAsset (25,50, noline, purp)
spr = RectangleAsset (20,10, noline, blue)
vy=0
player=0
acc = 0
ti = 0
rupx=0
lupx=0
vx=0
up=0
upup=0
stop = 0
shutup=0
spring = 0
sub = 0
springlist = []
def wup(event):
global wub
global mousexround
global mouseyround
wub = 1
if wub == 1:
mousexround=mousex-((mousex)%75)
mouseyround=mousey-((mousey)%75)
block = Sprite (sq, (mousexround, mouseyround))
def mousemo(event):
global mousex
global mousey
mousex=event.x
mousey=event.y
def spri(event):
global spring
global mousex
global mousey
global mouseyround
global sub
global springlist
sub =1
if sub == 1:
mouseyround=mousey-((mousey)%75)+65
springlist.append (Sprite (spr, (mousex, mouseyround)))
def pup(event):
global pub
global mousex
global mouseyround
global player
pub = 1
if pub == 1:
mouseyround=mousey-((mousey)%75)+25
if player == 0:
player = Sprite (play, (mousex, mouseyround))
def rup(event):
global rupx
rupx=1
def lup(event):
global lupx
lupx=1
def uup(event):
global up
up=1
def step():
if player != 0:
global vy
global acc
global ti
global rupx
global vx
global lupx
global up
global upup
global stop
global shutup
global springlist
global player
acc = 0.02
for s in springlist:
if player.collidingWith(s):
vy=-50+vy
vx=-vx
if stop == 0:
ti=ti+.5
if upup==4.5:
vy = (0.2*ti)-upup
else:
vy = (0.2*ti)
player.y=player.y+vy
player.x=player.x+vx
if rupx == 1:
vx=vx+1.5
lupx=0
rupx=0
if lupx == 1:
vx=vx-1.5
rupx=0
lupx=0
if vx > 3:
vx = 3
if vx < -3:
vx =-3
if up == 1:
upup = 4.5
up=0
if up == 0:
upup =4.5
col = player.collidingWithSprites(Sprite)
if len(col) > 1 and col[1].y<player.y+500:
stop=1
player.y=player.y-0.2
else:
stop=0
if stop == 1:
vy=0
ti=0
if len(col) > 1:
if col[1].y<player.y+50:
vx=-0.5*vx
if player.y > 2000:
player = 0
ti=0
myapp.listenKeyEvent('keyup', 's', spri)
myapp.listenKeyEvent('keydown', 'up arrow', uup)
myapp.listenKeyEvent('keydown', 'left arrow', lup)
myapp.listenKeyEvent('keydown', 'right arrow', rup)
myapp.listenKeyEvent('keyup', 'p', pup)
myapp.listenKeyEvent('keyup', 'w', wup)
myapp.listenMouseEvent('mousemove', mousemo)
myapp.run(step) | mit | 2,481,779,371,456,941,600 | 17.76 | 82 | 0.546254 | false |
Ophiuchus1312/enigma2-master | lib/python/Screens/TimerEdit.py | 1 | 20176 | from Components.ActionMap import ActionMap
from Components.Button import Button
from Components.Label import Label
from Components.config import config
from Components.MenuList import MenuList
from Components.TimerList import TimerList
from Components.TimerSanityCheck import TimerSanityCheck
from Components.UsageConfig import preferredTimerPath
from Components.Sources.StaticText import StaticText
from RecordTimer import RecordTimerEntry, parseEvent, AFTEREVENT
from Screens.Screen import Screen
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from ServiceReference import ServiceReference
from Screens.TimerEntry import TimerEntry, TimerLog
from Tools.BoundFunction import boundFunction
from Tools.FuzzyDate import FuzzyTime
from Tools.Directories import resolveFilename, SCOPE_HDD
from time import time, localtime
from timer import TimerEntry as RealTimerEntry
from enigma import eServiceCenter
import Tools.CopyFiles
import os
class TimerEditList(Screen):
EMPTY = 0
ENABLE = 1
DISABLE = 2
CLEANUP = 3
DELETE = 4
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Timer List"))
self.onChangedEntry = [ ]
list = [ ]
self.list = list
self.fillTimerList()
self["timerlist"] = TimerList(list)
self.key_red_choice = self.EMPTY
self.key_yellow_choice = self.EMPTY
self.key_blue_choice = self.EMPTY
self["key_red"] = Button(" ")
self["key_green"] = Button(_("Add"))
self["key_yellow"] = Button(" ")
self["key_blue"] = Button(" ")
self["description"] = Label()
self["actions"] = ActionMap(["OkCancelActions", "DirectionActions", "ShortcutActions", "TimerEditActions"],
{
"ok": self.openEdit,
"cancel": self.leave,
"green": self.addCurrentTimer,
"log": self.showLog,
"left": self.left,
"right": self.right,
"up": self.up,
"down": self.down
}, -1)
self.setTitle(_("Timer overview"))
self.session.nav.RecordTimer.on_state_change.append(self.onStateChange)
self.onShown.append(self.updateState)
def createSummary(self):
return TimerEditListSummary
def up(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.moveUp)
self.updateState()
def down(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.moveDown)
self.updateState()
def left(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.pageUp)
self.updateState()
def right(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.pageDown)
self.updateState()
def toggleDisabledState(self):
cur=self["timerlist"].getCurrent()
if cur:
t = cur
if t.disabled:
# print "try to ENABLE timer"
t.enable()
timersanitycheck = TimerSanityCheck(self.session.nav.RecordTimer.timer_list, cur)
if not timersanitycheck.check():
t.disable()
print "Sanity check failed"
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None:
self.session.openWithCallback(self.finishedEdit, TimerSanityConflict, simulTimerList)
else:
print "Sanity check passed"
if timersanitycheck.doubleCheck():
t.disable()
else:
if t.isRunning():
if t.repeated:
list = (
(_("Stop current event but not coming events"), "stoponlycurrent"),
(_("Stop current event and disable coming events"), "stopall"),
(_("Don't stop current event but disable coming events"), "stoponlycoming")
)
self.session.openWithCallback(boundFunction(self.runningEventCallback, t), ChoiceBox, title=_("Repeating event currently recording... What do you want to do?"), list = list)
else:
t.disable()
self.session.nav.RecordTimer.timeChanged(t)
self.refill()
self.updateState()
def runningEventCallback(self, t, result):
if result is not None:
if result[1] == "stoponlycurrent" or result[1] == "stopall":
t.enable()
t.processRepeated(findRunningEvent = False)
self.session.nav.RecordTimer.doActivate(t)
if result[1] == "stoponlycoming" or result[1] == "stopall":
t.disable()
self.session.nav.RecordTimer.timeChanged(t)
self.refill()
self.updateState()
def removeAction(self, descr):
actions = self["actions"].actions
if descr in actions:
del actions[descr]
def updateState(self):
cur = self["timerlist"].getCurrent()
if cur:
self["description"].setText(cur.description)
if self.key_red_choice != self.DELETE:
self["actions"].actions.update({"red":self.removeTimerQuestion})
self["key_red"].setText(_("Delete"))
self.key_red_choice = self.DELETE
if cur.disabled and (self.key_yellow_choice != self.ENABLE):
self["actions"].actions.update({"yellow":self.toggleDisabledState})
self["key_yellow"].setText(_("Enable"))
self.key_yellow_choice = self.ENABLE
elif cur.isRunning() and not cur.repeated and (self.key_yellow_choice != self.EMPTY):
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
elif ((not cur.isRunning())or cur.repeated ) and (not cur.disabled) and (self.key_yellow_choice != self.DISABLE):
self["actions"].actions.update({"yellow":self.toggleDisabledState})
self["key_yellow"].setText(_("Disable"))
self.key_yellow_choice = self.DISABLE
else:
if self.key_red_choice != self.EMPTY:
self.removeAction("red")
self["key_red"].setText(" ")
self.key_red_choice = self.EMPTY
if self.key_yellow_choice != self.EMPTY:
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
showCleanup = True
for x in self.list:
if (not x[0].disabled) and (x[1] == True):
break
else:
showCleanup = False
if showCleanup and (self.key_blue_choice != self.CLEANUP):
self["actions"].actions.update({"blue":self.cleanupQuestion})
self["key_blue"].setText(_("Cleanup"))
self.key_blue_choice = self.CLEANUP
elif (not showCleanup) and (self.key_blue_choice != self.EMPTY):
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
if len(self.list) == 0:
return
timer = self['timerlist'].getCurrent()
if timer:
try:
name = str(timer.name)
time = ("%s %s ... %s") % (FuzzyTime(timer.begin)[0], FuzzyTime(timer.begin)[1], FuzzyTime(timer.end)[1])
duration = ("(%d " + _("mins") + ")") % ((timer.end - timer.begin) / 60)
service = str(timer.service_ref.getServiceName())
if timer.state == RealTimerEntry.StateWaiting:
state = _("waiting")
elif timer.state == RealTimerEntry.StatePrepared:
state = _("about to start")
elif timer.state == RealTimerEntry.StateRunning:
if timer.justplay:
state = _("zapped")
else:
state = _("recording...")
elif timer.state == RealTimerEntry.StateEnded:
state = _("done!")
else:
state = _("<unknown>")
except:
name = ""
time = ""
duration = ""
service = ""
else:
name = ""
time = ""
duration = ""
service = ""
for cb in self.onChangedEntry:
cb(name, time, duration, service, state)
def fillTimerList(self):
#helper function to move finished timers to end of list
def eol_compare(x, y):
if x[0].state != y[0].state and x[0].state == RealTimerEntry.StateEnded or y[0].state == RealTimerEntry.StateEnded:
return cmp(x[0].state, y[0].state)
return cmp(x[0].begin, y[0].begin)
list = self.list
print list
del list[:]
list.extend([(timer, False) for timer in self.session.nav.RecordTimer.timer_list])
list.extend([(timer, True) for timer in self.session.nav.RecordTimer.processed_timers])
if config.usage.timerlist_finished_timer_position.index: #end of list
list.sort(cmp = eol_compare)
else:
list.sort(key = lambda x: x[0].begin)
def showLog(self):
cur=self["timerlist"].getCurrent()
if cur:
self.session.openWithCallback(self.finishedEdit, TimerLog, cur)
def openEdit(self):
cur=self["timerlist"].getCurrent()
if cur:
self.session.openWithCallback(self.finishedEdit, TimerEntry, cur)
def cleanupQuestion(self):
self.session.openWithCallback(self.cleanupTimer, MessageBox, _("Really delete done timers?"))
def cleanupTimer(self, delete):
if delete:
self.session.nav.RecordTimer.cleanup()
self.refill()
self.updateState()
def removeTimerQuestion(self):
cur = self["timerlist"].getCurrent()
service = str(cur.service_ref.getServiceName())
t = localtime(cur.begin)
f = str(t.tm_year) + str(t.tm_mon).zfill(2) + str(t.tm_mday).zfill(2) + " " + str(t.tm_hour).zfill(2) + str(t.tm_min).zfill(2) + " - " + service + " - " + cur.name
f = f.replace(':','_')
f = f.replace(',','_')
f = f.replace('/','_')
if not cur:
return
onhdd = False
self.moviename = f
path = resolveFilename(SCOPE_HDD)
files = os.listdir(path)
for file in files:
if file.startswith(f):
onhdd = True
break
if onhdd:
message = (_("Do you really want to delete %s?") % (cur.name))
choices = [(_("No"), "no"),
(_("Yes, delete from Timerlist"), "yes"),
(_("Yes, delete from Timerlist and delete recording"), "yesremove")]
self.session.openWithCallback(self.startDelete, ChoiceBox, title=message, list=choices)
else:
self.session.openWithCallback(self.removeTimer, MessageBox, _("Do you really want to delete %s?") % (cur.name), default = False)
def startDelete(self, answer):
if not answer or not answer[1]:
self.close()
return
if answer[1] == 'no':
return
elif answer[1] == 'yes':
self.removeTimer(True)
elif answer[1] == 'yesremove':
if config.EMC.movie_trashcan_enable.getValue():
trashpath = config.EMC.movie_trashcan_path.getValue()
self.MoveToTrash(trashpath)
elif config.usage.movielist_trashcan.getValue():
trashpath = resolveFilename(SCOPE_HDD) + '.Trash'
self.MoveToTrash(trashpath)
else:
self.session.openWithCallback(self.callbackRemoveRecording, MessageBox, _("Do you really want to delete the recording?"), default = False)
def callbackRemoveRecording(self, answer):
if not answer:
return
self.delete()
def removeTimer(self, result):
if not result:
return
list = self["timerlist"]
cur = list.getCurrent()
if cur:
timer = cur
timer.afterEvent = AFTEREVENT.NONE
self.session.nav.RecordTimer.removeEntry(timer)
self.refill()
self.updateState()
def MoveToTrash(self, trashpath):
self.removeTimer(True)
moviepath = os.path.normpath(resolveFilename(SCOPE_HDD))
movedList =[]
files = os.listdir(moviepath)
for file in files:
if file.startswith(self.moviename):
movedList.append((os.path.join(moviepath, file), os.path.join(trashpath, file)))
Tools.CopyFiles.moveFiles(movedList, None)
def delete(self):
item = self["timerlist"].getCurrent()
if item is None:
return # huh?
name = item.name
service = str(item.service_ref.getServiceName())
t = localtime(item.begin)
f = str(t.tm_year) + str(t.tm_mon).zfill(2) + str(t.tm_mday).zfill(2) + " " + str(t.tm_hour).zfill(2) + str(t.tm_min).zfill(2) + " - " + service + " - " + name
f = f.replace(':','_')
f = f.replace(',','_')
f = f.replace('/','_')
path = resolveFilename(SCOPE_HDD)
self.removeTimer(True)
from enigma import eBackgroundFileEraser
files = os.listdir(path)
for file in files:
if file.startswith(f):
eBackgroundFileEraser.getInstance().erase(os.path.realpath(path + file))
def refill(self):
oldsize = len(self.list)
self.fillTimerList()
lst = self["timerlist"]
newsize = len(self.list)
if oldsize and oldsize != newsize:
idx = lst.getCurrentIndex()
lst.entryRemoved(idx)
else:
lst.invalidate()
def addCurrentTimer(self):
event = None
service = self.session.nav.getCurrentService()
if service is not None:
info = service.info()
if info is not None:
event = info.getEvent(0)
# FIXME only works if already playing a service
serviceref = ServiceReference(self.session.nav.getCurrentlyPlayingServiceOrGroup())
if event is None:
data = (int(time()), int(time() + 60), "", "", None)
else:
data = parseEvent(event, description = False)
self.addTimer(RecordTimerEntry(serviceref, checkOldTimers = True, dirname = preferredTimerPath(), *data))
def addTimer(self, timer):
self.session.openWithCallback(self.finishedAdd, TimerEntry, timer)
def finishedEdit(self, answer):
# print "finished edit"
if answer[0]:
# print "Edited timer"
entry = answer[1]
timersanitycheck = TimerSanityCheck(self.session.nav.RecordTimer.timer_list, entry)
success = False
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None:
self.session.openWithCallback(self.finishedEdit, TimerSanityConflict, timersanitycheck.getSimulTimerList())
else:
success = True
else:
success = True
if success:
print "Sanity check passed"
self.session.nav.RecordTimer.timeChanged(entry)
self.fillTimerList()
self.updateState()
# else:
# print "Timeredit aborted"
def finishedAdd(self, answer):
# print "finished add"
if answer[0]:
entry = answer[1]
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
self.session.openWithCallback(self.finishSanityCorrection, TimerSanityConflict, simulTimerList)
self.fillTimerList()
self.updateState()
# else:
# print "Timeredit aborted"
def finishSanityCorrection(self, answer):
self.finishedAdd(answer)
def leave(self):
self.session.nav.RecordTimer.on_state_change.remove(self.onStateChange)
self.close()
def onStateChange(self, entry):
self.refill()
self.updateState()
class TimerSanityConflict(Screen):
EMPTY = 0
ENABLE = 1
DISABLE = 2
EDIT = 3
def __init__(self, session, timer):
Screen.__init__(self, session)
self.timer = timer
print "TimerSanityConflict"
self["timer1"] = TimerList(self.getTimerList(timer[0]))
self.list = []
self.list2 = []
count = 0
for x in timer:
if count != 0:
self.list.append((_("Conflicting timer") + " " + str(count), x))
self.list2.append((timer[count], False))
count += 1
if count == 1:
self.list.append((_("Channel not in services list")))
self["list"] = MenuList(self.list)
self["timer2"] = TimerList(self.list2)
self["key_red"] = Button("Edit")
self["key_green"] = Button(" ")
self["key_yellow"] = Button(" ")
self["key_blue"] = Button(" ")
self.key_green_choice = self.EMPTY
self.key_yellow_choice = self.EMPTY
self.key_blue_choice = self.EMPTY
self["actions"] = ActionMap(["OkCancelActions", "DirectionActions", "ShortcutActions", "TimerEditActions"],
{
"ok": self.leave_ok,
"cancel": self.leave_cancel,
"red": self.editTimer1,
"up": self.up,
"down": self.down
}, -1)
self.setTitle(_("Timer sanity error"))
self.onShown.append(self.updateState)
def getTimerList(self, timer):
return [(timer, False)]
def editTimer1(self):
self.session.openWithCallback(self.finishedEdit, TimerEntry, self["timer1"].getCurrent())
def editTimer2(self):
self.session.openWithCallback(self.finishedEdit, TimerEntry, self["timer2"].getCurrent())
def toggleNewTimer(self):
if self.timer[0].disabled:
self.timer[0].disabled = False
self.session.nav.RecordTimer.timeChanged(self.timer[0])
elif not self.timer[0].isRunning():
self.timer[0].disabled = True
self.session.nav.RecordTimer.timeChanged(self.timer[0])
self.finishedEdit((True, self.timer[0]))
def toggleTimer(self):
x = self["list"].getSelectedIndex() + 1 # the first is the new timer so we do +1 here
if self.timer[x].disabled:
self.timer[x].disabled = False
self.session.nav.RecordTimer.timeChanged(self.timer[x])
if not self.timer[0].isRunning():
self.timer[0].disabled = True
self.session.nav.RecordTimer.timeChanged(self.timer[0])
elif not self.timer[x].isRunning():
self.timer[x].disabled = True
self.session.nav.RecordTimer.timeChanged(self.timer[x])
if self.timer[x].disabled:
self.timer[0].disabled = False
self.session.nav.RecordTimer.timeChanged(self.timer[0])
self.finishedEdit((True, self.timer[0]))
def finishedEdit(self, answer):
self.leave_ok()
def leave_ok(self):
self.close((True, self.timer[0]))
def leave_cancel(self):
self.close((False, self.timer[0]))
def up(self):
self["list"].instance.moveSelection(self["list"].instance.moveUp)
self["timer2"].moveToIndex(self["list"].getSelectedIndex())
def down(self):
self["list"].instance.moveSelection(self["list"].instance.moveDown)
self["timer2"].moveToIndex(self["list"].getSelectedIndex())
def removeAction(self, descr):
actions = self["actions"].actions
if descr in actions:
del actions[descr]
def updateState(self):
if self.timer[0] is not None:
if self.timer[0].disabled and self.key_green_choice != self.ENABLE:
self["actions"].actions.update({"green":self.toggleTimer})
self["key_green"].setText(_("Enable"))
self.key_green_choice = self.ENABLE
elif self.timer[0].isRunning() and not self.timer[0].repeated and self.key_green_choice != self.EMPTY:
self.removeAction("green")
self["key_green"].setText(" ")
self.key_green_choice = self.EMPTY
elif (not self.timer[0].isRunning() or self.timer[0].repeated ) and self.key_green_choice != self.DISABLE:
self["actions"].actions.update({"green":self.toggleNewTimer})
self["key_green"].setText(_("Disable"))
self.key_green_choice = self.DISABLE
if len(self.timer) > 1:
x = self["list"].getSelectedIndex() + 1 # the first is the new timer so we do +1 here
if self.timer[x] is not None:
if self.key_yellow_choice == self.EMPTY:
self["actions"].actions.update({"yellow":self.editTimer2})
self["key_yellow"].setText(_("Edit"))
self.key_yellow_choice = self.EDIT
if self.timer[x].disabled and self.key_blue_choice != self.ENABLE:
self["actions"].actions.update({"blue":self.toggleTimer})
self["key_blue"].setText(_("Enable"))
self.key_blue_choice = self.ENABLE
elif self.timer[x].isRunning() and not self.timer[x].repeated and self.key_blue_choice != self.EMPTY:
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
elif (not self.timer[x].isRunning() or self.timer[x].repeated ) and self.key_blue_choice != self.DISABLE:
self["actions"].actions.update({"blue":self.toggleTimer})
self["key_blue"].setText(_("Disable"))
self.key_blue_choice = self.DISABLE
else:
#FIXME.... this doesnt hide the buttons self.... just the text
if self.key_yellow_choice != self.EMPTY:
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
if self.key_blue_choice != self.EMPTY:
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
class TimerEditListSummary(Screen):
def __init__(self, session, parent):
Screen.__init__(self, session, parent = parent)
self["name"] = StaticText("")
self["service"] = StaticText("")
self["time"] = StaticText("")
self["duration"] = StaticText("")
self["state"] = StaticText("")
self.onShow.append(self.addWatcher)
self.onHide.append(self.removeWatcher)
def addWatcher(self):
self.parent.onChangedEntry.append(self.selectionChanged)
self.parent.updateState()
def removeWatcher(self):
self.parent.onChangedEntry.remove(self.selectionChanged)
def selectionChanged(self, name, time, duration, service, state):
self["name"].text = name
self["service"].text = service
self["time"].text = time
self["duration"].text = duration
self["state"].text = state
| gpl-2.0 | -1,313,426,634,689,652,200 | 31.753247 | 179 | 0.6875 | false |
polaris-gslb/polaris-core | tests/test-polaris-pdns.py | 2 | 1937 | #!/usr/bin/env python3
import subprocess
import sys
import time
import json
POLARIS_PDNS_FILE = '/opt/polaris/bin/polaris-pdns'
def pretty_json(s):
d = json.loads(s)
return json.dumps(d, indent=4, separators=(',', ': '))
class TestPolarisPDNS:
def __init__(self, polaris_pdns_file):
self.proc = subprocess.Popen([ polaris_pdns_file ],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
def execute_query(self, query):
query += '\n'
self.proc.stdin.write(query.encode())
self.proc.stdin.flush()
output = self.proc.stdout.readline().decode()
return pretty_json(output)
def prepare_query(self, method, params):
q = {
'method': method,
'parameters': {
'qtype': params['qtype'],
'qname': params['qname'],
'remote': params['remote'],
'local': params['local'],
'real-remote': params['real-remote'],
'zone-id': params['zone-id']
}
}
return json.dumps(q)
if __name__ == '__main__':
t = TestPolarisPDNS(POLARIS_PDNS_FILE)
method = 'lookup'
params = {
'qtype': 'A',
'qname': 'www.example.com',
'remote': '10.1.1.21',
'local': '0.0.0.0',
'real-remote': '10.1.1.21/32',
'zone-id': -1
}
q = t.prepare_query(method, params)
print("query: ", pretty_json(q), "\n")
print("response: ", t.execute_query(q))
method = 'lookup'
params = {
'qtype': 'SOA',
'qname': 'www.example.com',
'remote': '10.1.1.21',
'local': '0.0.0.0',
'real-remote': '10.1.1.21/32',
'zone-id': -1
}
q = t.prepare_query(method, params)
print("query: ", pretty_json(q), "\n")
print("response: ", t.execute_query(q))
| bsd-3-clause | 3,387,221,317,398,084,600 | 24.486842 | 62 | 0.497161 | false |
daniel20162016/my-first | read_xml_all/calcul_matrix_compare_je_good_192matrix.py | 1 | 6357 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 15:45:22 2016
@author: wang
"""
#from matplotlib import pylab as plt
#from numpy import fft, fromstring, int16, linspace
#import wave
from read_wav_xml_good_1 import*
from matrix_24_2 import*
from max_matrix_norm import*
import numpy as np
# open a wave file
filename = 'francois_filon_pure_3.wav'
filename_1 ='francois_filon_pure_3.xml'
word ='je'
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word)
#print 'word_start_point=',word_start_point
#print 'word_length_point=',word_length_point
#print 'word_end_point=',word_end_point
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#print 'matrix_all_step_4=',matrix_all_step_4
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
#print 'matrix_all_step_5=',matrix_all_step_5
np.savez('je_compare_192_matrix.npz',matrix_all_step_new_1,matrix_all_step_new_2,matrix_all_step_new_3,matrix_all_step_new_4,matrix_all_step_new_5)
| mit | 1,603,875,107,597,510,700 | 38.484472 | 147 | 0.46028 | false |
wholland/env | env.py | 1 | 6119 | #!/usr/bin/python
import argparse
import json
import shutil
import os
def copy_file(src, dest, backup):
success = True
if not backup is None:
(backup_folder, backup_file) = os.path.split(backup)
print("Creating backup file for " + dest + " at " + backup)
try:
if not os.path.exists(backup_folder):
os.makedirs(backup_folder)
shutil.copyfile(dest, backup)
except Exception as e:
print("Backup failed: " + str(e))
success = False
if success:
(dest_folder, dest_file) = os.path.split(dest)
print("Copy file " + src + " to " + dest)
try:
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
shutil.copyfile(src, dest)
except IOError as e:
print("Copy failed: " + str(e))
def copy_dir(src, dest, backup):
success = True
if not backup is None:
try:
print("Creating backup file for " + dest + " at " + backup)
rmtree(backup, ignore_errors=True)
shutil.copytree(dest, backup)
except IOError as e:
print("Backup failed: " + str(e))
success = False
if success:
try:
print("Copy directory " + src + " to " + dest)
shutil.copytree(src, dest)
except IOError as e:
print("Copy failed: " + str(e))
def push(args):
defs = json.load(open(os.path.expanduser(args.file)))
for definition in defs:
if definition["group"] in args.categories:
print("Pushing " + definition["name"]);
src = os.path.expanduser(os.path.join(args.source, definition["source"]))
dest = os.path.expanduser(os.path.join(args.target, definition["target"]))
backup = os.path.expanduser(os.path.join(args.backup, definition["target"]))
if definition["type"].lower() == "f":
# Copy a file
if args.unsafe:
if not args.wimp:
copy_file(src, dest, None)
else:
print("Would copy file. Src:" + src + " Dest:" + dest);
else:
if not args.wimp:
copy_file(src, dest, backup)
else:
print("Would copy file. Src:" + src + " Dest:" + dest + " Backup:" + backup);
elif definition["type"].lower() == "d":
# Copy a directory
if args.verbose:
print(definition["name"] + ": Pushing directory from " + src + " to " + dest)
if args.unsafe:
if not args.wimp:
copy_dir(src, dest, None)
else:
print("Would copy file. Src:" + src + " Dest:" + dest);
else:
if not args.wimp:
copy_dir(src, dest, backup)
else:
print("Would copy dir. Src:" + src + " Dest:" + dest + " Backup:" + backup);
else:
print(definition["name"] + ": Unknown type \""+definition["type"]+"\"")
def pull(args):
defs = json.load(open(os.path.expanduser(args.file)))
for definition in defs:
if definition["group"] in args.categories:
print("Pulling " + definition["name"]);
src = os.path.expanduser(os.path.join(args.target, definition["target"]))
dest = os.path.expanduser(os.path.join(args.source, definition["source"]))
if definition["type"].lower() == "f":
# Copy a file
if not args.wimp:
copy_file(src, dest, None)
else:
print("Would copy file. Src:" + src + " Dest:" + dest);
elif definition["type"].lower() == "d":
# Copy a directory
if not args.wimp:
copy_dir(src, dest, None)
else:
print("Would copy directory. Src:" + src + " Dest:" + dest);
else:
print(definition["name"] + ": Unknown type \""+definition["type"]+"\"")
def revert(args):
defs = json.load(open(os.path.expanduser(args.file)))
for definition in defs:
if definition["group"] in args.categories:
src = os.path.expanduser(os.path.join(args.backup, definition["target"]))
dest = os.path.expanduser(os.path.join(args.target, definition["target"]))
if definition["type"].lower() == "f":
# Copy a file
if not args.wimp:
copy_file(src, dest, None)
else:
print("Would copy file. Src:" + src + " Dest:" + dest);
elif definition["type"].lower() == "d":
# Copy a directory
if not args.wimp:
copy_dir(src, dest, None)
else:
print("Would copy directory. Src:" + src + " Dest:" + dest);
else:
print(definition["name"] + ": Unknown type \""+definition["type"]+"\"")
def main():
default_defs = "~/env/env.def"
default_source = "~/env/"
default_target = "~/"
default_backup = "~/.backup/env/"
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action="store_true", help="Increase Verbosity")
parser.add_argument("-f", "--file", default=default_defs, help="Definition File to use")
parser.add_argument("-s", "--source", default=default_source, help="Override source root")
parser.add_argument("-t", "--target", default=default_target, help="Override target root")
parser.add_argument("-w", "--wimp", action="store_true", help="Don't actually make any changes (implies -v)")
subparsers = parser.add_subparsers()
parser_push = subparsers.add_parser("push", help="Push configs into environment")
parser_push.add_argument("-u", "--unsafe", action="store_true", help="No backups Created")
parser_push.add_argument("-a", "--All", action="store_true", help="Cleanup Backups")
parser_push.add_argument("-b", "--backup", default=default_backup, help="Override backup root")
parser_push.add_argument("categories", nargs=argparse.REMAINDER)
parser_push.set_defaults(func=push)
parser_pull = subparsers.add_parser("pull", help="Pull configs from environment")
parser_pull.add_argument("-a", "--All", action="store_true", help="Cleanup Backups")
parser_pull.add_argument("categories", nargs=argparse.REMAINDER)
parser_pull.set_defaults(func=pull)
parser_revert = subparsers.add_parser("revert", help="Revert configs from backups")
parser_revert.add_argument("-c", "--cleanup", action="store_true", help="Cleanup Backups")
parser_revert.add_argument("-a", "--All", action="store_true", help="Cleanup Backups")
parser_revert.add_argument("-b", "--backup", default=default_backup, help="Override backup root")
parser_revert.add_argument("categories", nargs=argparse.REMAINDER)
parser_revert.set_defaults(func=revert)
args = parser.parse_args()
if args.wimp:
args.verbose = True
args.func(args)
if __name__ == "__main__":
main();
| mit | 4,431,598,936,096,710,000 | 34.575581 | 110 | 0.648962 | false |
jailuthra/misc | python/quicksort.py | 1 | 1066 | import sys
import random
comparisons = 0
def main():
global comparisons
with open(sys.argv[1], 'r') as f:
arr = [int(x) for x in f.read().split()]
quicksort(arr, 0, len(arr)-1)
# print(arr)
print(comparisons)
def getPivot(arr, l, r):
first = arr[l]
mid = arr[(l+r)//2]
last = arr[r]
if first <= mid <= last or last <= mid <= first:
return (l+r)//2
elif mid <= first <= last or last <= first <= mid:
return l
else:
return r
def partition(arr, l, r):
k = getPivot(arr, l, r)
k = random.randint(l, r)
pivot = arr[k]
arr[k], arr[l] = arr[l], arr[k]
i = l+1
for j in range(l+1, r+1):
if arr[j] < pivot:
arr[j], arr[i] = arr[i], arr[j]
i += 1
arr[l], arr[i-1] = arr[i-1], arr[l]
return i-1
def quicksort(arr, l, r):
if r - l < 0:
return
global comparisons
comparisons += r - l
p = partition(arr, l, r)
quicksort(arr, l, p-1)
quicksort(arr, p+1, r)
if __name__ == '__main__':
main()
| mit | 7,158,383,021,174,650,000 | 21.208333 | 54 | 0.5 | false |
nens/threedi-qgis-plugin | tests/test_geo_utils.py | 1 | 1446 | """
Test geo utils.
"""
from qgis.core import QgsCoordinateTransform
from ThreeDiToolbox.tests.utilities import ensure_qgis_app_is_initialized
from ThreeDiToolbox.utils.geo_utils import get_coord_transformation_instance
import pytest
@pytest.fixture
def rdnew_to_wgs84():
ensure_qgis_app_is_initialized()
src_epsg, dest_epsg = 28992, 4326
transformer = get_coord_transformation_instance(src_epsg, dest_epsg)
return transformer
@pytest.fixture
def wgs84_to_rdnew():
ensure_qgis_app_is_initialized()
src_epsg, dest_epsg = 4326, 28992
transformer = get_coord_transformation_instance(src_epsg, dest_epsg)
return transformer
def test_get_coord_transformation_instance(rdnew_to_wgs84, wgs84_to_rdnew):
assert isinstance(rdnew_to_wgs84, QgsCoordinateTransform)
assert isinstance(wgs84_to_rdnew, QgsCoordinateTransform)
def test_get_coord_transformation_epsg(rdnew_to_wgs84):
assert rdnew_to_wgs84.sourceCrs().isValid()
assert rdnew_to_wgs84.sourceCrs().authid() == "EPSG:28992"
assert rdnew_to_wgs84.destinationCrs().isValid()
assert rdnew_to_wgs84.destinationCrs().authid() == "EPSG:4326"
def test_get_coord_transformation_epsg_reverse(wgs84_to_rdnew):
assert wgs84_to_rdnew.sourceCrs().isValid()
assert wgs84_to_rdnew.sourceCrs().authid() == "EPSG:4326"
assert wgs84_to_rdnew.destinationCrs().isValid()
assert wgs84_to_rdnew.destinationCrs().authid() == "EPSG:28992"
| gpl-3.0 | -167,221,051,593,308,580 | 31.863636 | 76 | 0.744813 | false |
mistercrunch/panoramix | superset/views/base_api.py | 2 | 21953 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import logging
from typing import Any, Callable, cast, Dict, List, Optional, Set, Tuple, Type, Union
from apispec import APISpec
from apispec.exceptions import DuplicateComponentNameError
from flask import Blueprint, g, Response
from flask_appbuilder import AppBuilder, Model, ModelRestApi
from flask_appbuilder.api import expose, protect, rison, safe
from flask_appbuilder.models.filters import BaseFilter, Filters
from flask_appbuilder.models.sqla.filters import FilterStartsWith
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import lazy_gettext as _
from marshmallow import fields, Schema
from sqlalchemy import and_, distinct, func
from sqlalchemy.orm.query import Query
from superset.extensions import db, event_logger, security_manager
from superset.models.core import FavStar
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.schemas import error_payload_content
from superset.sql_lab import Query as SqllabQuery
from superset.stats_logger import BaseStatsLogger
from superset.typing import FlaskResponse
from superset.utils.core import time_function
logger = logging.getLogger(__name__)
get_related_schema = {
"type": "object",
"properties": {
"page_size": {"type": "integer"},
"page": {"type": "integer"},
"include_ids": {"type": "array", "items": {"type": "integer"}},
"filter": {"type": "string"},
},
}
class RelatedResultResponseSchema(Schema):
value = fields.Integer(description="The related item identifier")
text = fields.String(description="The related item string representation")
class RelatedResponseSchema(Schema):
count = fields.Integer(description="The total number of related values")
result = fields.List(fields.Nested(RelatedResultResponseSchema))
class DistinctResultResponseSchema(Schema):
text = fields.String(description="The distinct item")
class DistincResponseSchema(Schema):
count = fields.Integer(description="The total number of distinct values")
result = fields.List(fields.Nested(DistinctResultResponseSchema))
def statsd_metrics(f: Callable[..., Any]) -> Callable[..., Any]:
"""
Handle sending all statsd metrics from the REST API
"""
def wraps(self: "BaseSupersetModelRestApi", *args: Any, **kwargs: Any) -> Response:
try:
duration, response = time_function(f, self, *args, **kwargs)
except Exception as ex:
self.incr_stats("error", f.__name__)
raise ex
self.send_stats_metrics(response, f.__name__, duration)
return response
return functools.update_wrapper(wraps, f)
class RelatedFieldFilter:
# data class to specify what filter to use on a /related endpoint
# pylint: disable=too-few-public-methods
def __init__(self, field_name: str, filter_class: Type[BaseFilter]):
self.field_name = field_name
self.filter_class = filter_class
class BaseFavoriteFilter(BaseFilter): # pylint: disable=too-few-public-methods
"""
Base Custom filter for the GET list that filters all dashboards, slices
that a user has favored or not
"""
name = _("Is favorite")
arg_name = ""
class_name = ""
""" The FavStar class_name to user """
model: Type[Union[Dashboard, Slice, SqllabQuery]] = Dashboard
""" The SQLAlchemy model """
def apply(self, query: Query, value: Any) -> Query:
# If anonymous user filter nothing
if security_manager.current_user is None:
return query
users_favorite_query = db.session.query(FavStar.obj_id).filter(
and_(
FavStar.user_id == g.user.get_id(),
FavStar.class_name == self.class_name,
)
)
if value:
return query.filter(and_(self.model.id.in_(users_favorite_query)))
return query.filter(and_(~self.model.id.in_(users_favorite_query)))
class BaseSupersetModelRestApi(ModelRestApi):
"""
Extends FAB's ModelResApi to implement specific superset generic functionality
"""
csrf_exempt = False
method_permission_name = {
"bulk_delete": "delete",
"data": "list",
"data_from_cache": "list",
"delete": "delete",
"distinct": "list",
"export": "mulexport",
"import_": "add",
"get": "show",
"get_list": "list",
"info": "list",
"post": "add",
"put": "edit",
"refresh": "edit",
"related": "list",
"related_objects": "list",
"schemas": "list",
"select_star": "list",
"table_metadata": "list",
"test_connection": "post",
"thumbnail": "list",
"viz_types": "list",
}
order_rel_fields: Dict[str, Tuple[str, str]] = {}
"""
Impose ordering on related fields query::
order_rel_fields = {
"<RELATED_FIELD>": ("<RELATED_FIELD_FIELD>", "<asc|desc>"),
...
}
""" # pylint: disable=pointless-string-statement
related_field_filters: Dict[str, Union[RelatedFieldFilter, str]] = {}
"""
Declare the filters for related fields::
related_fields = {
"<RELATED_FIELD>": <RelatedFieldFilter>)
}
""" # pylint: disable=pointless-string-statement
filter_rel_fields: Dict[str, BaseFilter] = {}
"""
Declare the related field base filter::
filter_rel_fields_field = {
"<RELATED_FIELD>": "<FILTER>")
}
""" # pylint: disable=pointless-string-statement
allowed_rel_fields: Set[str] = set()
"""
Declare a set of allowed related fields that the `related` endpoint supports
""" # pylint: disable=pointless-string-statement
text_field_rel_fields: Dict[str, str] = {}
"""
Declare an alternative for the human readable representation of the Model object::
text_field_rel_fields = {
"<RELATED_FIELD>": "<RELATED_OBJECT_FIELD>"
}
""" # pylint: disable=pointless-string-statement
allowed_distinct_fields: Set[str] = set()
openapi_spec_component_schemas: Tuple[Type[Schema], ...] = tuple()
"""
Add extra schemas to the OpenAPI component schemas section
""" # pylint: disable=pointless-string-statement
add_columns: List[str]
edit_columns: List[str]
list_columns: List[str]
show_columns: List[str]
responses = {
"400": {"description": "Bad request", "content": error_payload_content},
"401": {"description": "Unauthorized", "content": error_payload_content},
"403": {"description": "Forbidden", "content": error_payload_content},
"404": {"description": "Not found", "content": error_payload_content},
"422": {
"description": "Could not process entity",
"content": error_payload_content,
},
"500": {"description": "Fatal error", "content": error_payload_content},
}
def __init__(self) -> None:
# Setup statsd
self.stats_logger = BaseStatsLogger()
# Add base API spec base query parameter schemas
if self.apispec_parameter_schemas is None: # type: ignore
self.apispec_parameter_schemas = {}
self.apispec_parameter_schemas["get_related_schema"] = get_related_schema
if self.openapi_spec_component_schemas is None:
self.openapi_spec_component_schemas = ()
self.openapi_spec_component_schemas = self.openapi_spec_component_schemas + (
RelatedResponseSchema,
DistincResponseSchema,
)
super().__init__()
def add_apispec_components(self, api_spec: APISpec) -> None:
"""
Adds extra OpenApi schema spec components, these are declared
on the `openapi_spec_component_schemas` class property
"""
for schema in self.openapi_spec_component_schemas:
try:
api_spec.components.schema(
schema.__name__, schema=schema,
)
except DuplicateComponentNameError:
pass
super().add_apispec_components(api_spec)
def create_blueprint(
self, appbuilder: AppBuilder, *args: Any, **kwargs: Any
) -> Blueprint:
self.stats_logger = self.appbuilder.get_app.config["STATS_LOGGER"]
return super().create_blueprint(appbuilder, *args, **kwargs)
def _init_properties(self) -> None:
model_id = self.datamodel.get_pk_name()
if self.list_columns is None and not self.list_model_schema:
self.list_columns = [model_id]
if self.show_columns is None and not self.show_model_schema:
self.show_columns = [model_id]
if self.edit_columns is None and not self.edit_model_schema:
self.edit_columns = [model_id]
if self.add_columns is None and not self.add_model_schema:
self.add_columns = [model_id]
super()._init_properties()
def _get_related_filter(
self, datamodel: SQLAInterface, column_name: str, value: str
) -> Filters:
filter_field = self.related_field_filters.get(column_name)
if isinstance(filter_field, str):
filter_field = RelatedFieldFilter(cast(str, filter_field), FilterStartsWith)
filter_field = cast(RelatedFieldFilter, filter_field)
search_columns = [filter_field.field_name] if filter_field else None
filters = datamodel.get_filters(search_columns)
base_filters = self.filter_rel_fields.get(column_name)
if base_filters:
filters.add_filter_list(base_filters)
if value and filter_field:
filters.add_filter(
filter_field.field_name, filter_field.filter_class, value
)
return filters
def _get_distinct_filter(self, column_name: str, value: str) -> Filters:
filter_field = RelatedFieldFilter(column_name, FilterStartsWith)
filter_field = cast(RelatedFieldFilter, filter_field)
search_columns = [filter_field.field_name] if filter_field else None
filters = self.datamodel.get_filters(search_columns)
filters.add_filter_list(self.base_filters)
if value and filter_field:
filters.add_filter(
filter_field.field_name, filter_field.filter_class, value
)
return filters
def _get_text_for_model(self, model: Model, column_name: str) -> str:
if column_name in self.text_field_rel_fields:
model_column_name = self.text_field_rel_fields.get(column_name)
if model_column_name:
return getattr(model, model_column_name)
return str(model)
def _get_result_from_rows(
self, datamodel: SQLAInterface, rows: List[Model], column_name: str
) -> List[Dict[str, Any]]:
return [
{
"value": datamodel.get_pk_value(row),
"text": self._get_text_for_model(row, column_name),
}
for row in rows
]
def _add_extra_ids_to_result(
self,
datamodel: SQLAInterface,
column_name: str,
ids: List[int],
result: List[Dict[str, Any]],
) -> None:
if ids:
# Filter out already present values on the result
values = [row["value"] for row in result]
ids = [id_ for id_ in ids if id_ not in values]
pk_col = datamodel.get_pk()
# Fetch requested values from ids
extra_rows = db.session.query(datamodel.obj).filter(pk_col.in_(ids)).all()
result += self._get_result_from_rows(datamodel, extra_rows, column_name)
def incr_stats(self, action: str, func_name: str) -> None:
"""
Proxy function for statsd.incr to impose a key structure for REST API's
:param action: String with an action name eg: error, success
:param func_name: The function name
"""
self.stats_logger.incr(f"{self.__class__.__name__}.{func_name}.{action}")
def timing_stats(self, action: str, func_name: str, value: float) -> None:
"""
Proxy function for statsd.incr to impose a key structure for REST API's
:param action: String with an action name eg: error, success
:param func_name: The function name
:param value: A float with the time it took for the endpoint to execute
"""
self.stats_logger.timing(
f"{self.__class__.__name__}.{func_name}.{action}", value
)
def send_stats_metrics(
self, response: Response, key: str, time_delta: Optional[float] = None
) -> None:
"""
Helper function to handle sending statsd metrics
:param response: flask response object, will evaluate if it was an error
:param key: The function name
:param time_delta: Optional time it took for the endpoint to execute
"""
if 200 <= response.status_code < 400:
self.incr_stats("success", key)
else:
self.incr_stats("error", key)
if time_delta:
self.timing_stats("time", key, time_delta)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.info",
object_ref=False,
log_to_statsd=False,
)
def info_headless(self, **kwargs: Any) -> Response:
"""
Add statsd metrics to builtin FAB _info endpoint
"""
duration, response = time_function(super().info_headless, **kwargs)
self.send_stats_metrics(response, self.info.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.get",
object_ref=False,
log_to_statsd=False,
)
def get_headless(self, pk: int, **kwargs: Any) -> Response:
"""
Add statsd metrics to builtin FAB GET endpoint
"""
duration, response = time_function(super().get_headless, pk, **kwargs)
self.send_stats_metrics(response, self.get.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.get_list",
object_ref=False,
log_to_statsd=False,
)
def get_list_headless(self, **kwargs: Any) -> Response:
"""
Add statsd metrics to builtin FAB GET list endpoint
"""
duration, response = time_function(super().get_list_headless, **kwargs)
self.send_stats_metrics(response, self.get_list.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.post",
object_ref=False,
log_to_statsd=False,
)
def post_headless(self) -> Response:
"""
Add statsd metrics to builtin FAB POST endpoint
"""
duration, response = time_function(super().post_headless)
self.send_stats_metrics(response, self.post.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.put",
object_ref=False,
log_to_statsd=False,
)
def put_headless(self, pk: int) -> Response:
"""
Add statsd metrics to builtin FAB PUT endpoint
"""
duration, response = time_function(super().put_headless, pk)
self.send_stats_metrics(response, self.put.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.delete",
object_ref=False,
log_to_statsd=False,
)
def delete_headless(self, pk: int) -> Response:
"""
Add statsd metrics to builtin FAB DELETE endpoint
"""
duration, response = time_function(super().delete_headless, pk)
self.send_stats_metrics(response, self.delete.__name__, duration)
return response
@expose("/related/<column_name>", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_related_schema)
def related(self, column_name: str, **kwargs: Any) -> FlaskResponse:
"""Get related fields data
---
get:
parameters:
- in: path
schema:
type: string
name: column_name
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_related_schema'
responses:
200:
description: Related column data
content:
application/json:
schema:
schema:
$ref: "#/components/schemas/RelatedResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if column_name not in self.allowed_rel_fields:
self.incr_stats("error", self.related.__name__)
return self.response_404()
args = kwargs.get("rison", {})
# handle pagination
page, page_size = self._handle_page_args(args)
try:
datamodel = self.datamodel.get_related_interface(column_name)
except KeyError:
return self.response_404()
page, page_size = self._sanitize_page_args(page, page_size)
# handle ordering
order_field = self.order_rel_fields.get(column_name)
if order_field:
order_column, order_direction = order_field
else:
order_column, order_direction = "", ""
# handle filters
filters = self._get_related_filter(datamodel, column_name, args.get("filter"))
# Make the query
_, rows = datamodel.query(
filters, order_column, order_direction, page=page, page_size=page_size
)
# produce response
result = self._get_result_from_rows(datamodel, rows, column_name)
# If ids are specified make sure we fetch and include them on the response
ids = args.get("include_ids")
self._add_extra_ids_to_result(datamodel, column_name, ids, result)
return self.response(200, count=len(result), result=result)
@expose("/distinct/<column_name>", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_related_schema)
def distinct(self, column_name: str, **kwargs: Any) -> FlaskResponse:
"""Get distinct values from field data
---
get:
parameters:
- in: path
schema:
type: string
name: column_name
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_related_schema'
responses:
200:
description: Distinct field data
content:
application/json:
schema:
schema:
$ref: "#/components/schemas/DistincResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if column_name not in self.allowed_distinct_fields:
self.incr_stats("error", self.related.__name__)
return self.response_404()
args = kwargs.get("rison", {})
# handle pagination
page, page_size = self._sanitize_page_args(*self._handle_page_args(args))
# Create generic base filters with added request filter
filters = self._get_distinct_filter(column_name, args.get("filter"))
# Make the query
query_count = self.appbuilder.get_session.query(
func.count(distinct(getattr(self.datamodel.obj, column_name)))
)
count = self.datamodel.apply_filters(query_count, filters).scalar()
if count == 0:
return self.response(200, count=count, result=[])
query = self.appbuilder.get_session.query(
distinct(getattr(self.datamodel.obj, column_name))
)
# Apply generic base filters with added request filter
query = self.datamodel.apply_filters(query, filters)
# Apply sort
query = self.datamodel.apply_order_by(query, column_name, "asc")
# Apply pagination
result = self.datamodel.apply_pagination(query, page, page_size).all()
# produce response
result = [
{"text": item[0], "value": item[0]}
for item in result
if item[0] is not None
]
return self.response(200, count=count, result=result)
| apache-2.0 | 3,086,351,560,622,024,000 | 36.398637 | 88 | 0.605475 | false |
sippy/voiptests | test_cases/reinv_brkn2.py | 1 | 2000 | # Copyright (c) 2016 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from test_cases.reinv_fail import a_test_reinv_fail
from test_cases.reinvite import b_test_reinvite
class a_test_reinv_brkn2(a_test_reinv_fail):
cld = 'bob_reinv_brkn2'
cli = 'alice_reinv_brkn2'
def reinvite(self, ua):
if not self.connect_done or self.disconnect_done:
return
sdp_body_bak = ua.lSDP
ua.lSDP = sdp_body_bak.getCopy()
for sect in ua.lSDP.content.sections:
sect.c_header = None
rval = a_test_reinv_fail.reinvite(self, ua)
ua.lSDP = sdp_body_bak
return rval
class b_test_reinv_brkn2(b_test_reinvite):
cli = 'bob_reinv_brkn2'
| bsd-2-clause | 1,664,678,858,462,998,500 | 43.444444 | 82 | 0.74 | false |
pycroscopy/pycroscopy | tests/io/test_hdf_writer.py | 1 | 36224 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: Suhas Somnath
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import os
import h5py
import numpy as np
import sys
sys.path.append("../../../pycroscopy/")
from pycroscopy.io.virtual_data import VirtualGroup, VirtualDataset
from pycroscopy.io.hdf_writer import HDFwriter
from pyUSID.io.hdf_utils import get_attr, get_h5_obj_refs # Until an elegant solution presents itself
class TestHDFWriter(unittest.TestCase):
@staticmethod
def __delete_existing_file(file_path):
if os.path.exists(file_path):
os.remove(file_path)
def test_init_invalid_input(self):
with self.assertRaises(TypeError):
_ = HDFwriter(4)
def test_init_path_non_existant_file_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
writer = HDFwriter(file_path)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_path_existing_file_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
# Existing h5 file
writer = HDFwriter(file_path)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_h5_handle_r_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
h5_f = h5py.File(file_path, mode='r')
# hdf handle but of mode r
with self.assertRaises(TypeError):
_ = HDFwriter(h5_f)
os.remove(file_path)
def test_init_h5_handle_r_plus_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
h5_f = h5py.File(file_path, mode='r+')
# open h5 file handle or mode r+
writer = HDFwriter(h5_f)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_h5_handle_w_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
h5_f = h5py.File(file_path, mode='w')
# open h5 file handle or mode w
writer = HDFwriter(h5_f)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_h5_handle_closed(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
# Existing h5 file but closed
with self.assertRaises(ValueError):
_ = HDFwriter(h5_f)
os.remove(file_path)
def test_simple_dset_write_success_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dtype = np.uint16
dset_name = 'test'
data = np.random.randint(0, high=15, size=5, dtype=dtype)
microdset = VirtualDataset(dset_name, data)
writer = HDFwriter(h5_f)
h5_d = writer._create_simple_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertTrue(np.allclose(h5_d[()], data))
self.assertEqual(h5_d.dtype, dtype)
os.remove(file_path)
def test_simple_dset_write_success_more_options_02(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
data = np.random.rand(16, 1024)
dtype = data.dtype
compression = 'gzip'
chunking=(1, 1024)
microdset = VirtualDataset(dset_name, data, dtype=dtype, compression=compression, chunking=chunking)
writer = HDFwriter(h5_f)
h5_d = writer._create_simple_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertTrue(np.allclose(h5_d[()], data))
self.assertEqual(h5_d.dtype, dtype)
self.assertEqual(h5_d.compression, compression)
self.assertEqual(h5_d.chunks, chunking)
os.remove(file_path)
def test_simple_dset_write_success_more_options_03(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
data = np.random.rand(16, 1024)
dtype = np.float16
compression = 'gzip'
chunking=(1, 1024)
microdset = VirtualDataset(dset_name, data, dtype=dtype, compression=compression, chunking=chunking)
writer = HDFwriter(h5_f)
h5_d = writer._create_simple_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertEqual(h5_d.dtype, dtype)
self.assertEqual(h5_d.compression, compression)
self.assertEqual(h5_d.chunks, chunking)
self.assertTrue(np.all(h5_d[()] - data < 1E-3))
os.remove(file_path)
def test_empty_dset_write_success_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
maxshape = (16, 1024)
microdset = VirtualDataset(dset_name, None, maxshape=maxshape)
writer = HDFwriter(h5_f)
h5_d = writer._create_empty_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, maxshape)
self.assertEqual(h5_d.maxshape, maxshape)
# dtype is assigned automatically by h5py. Not to be tested here
os.remove(file_path)
def test_empty_dset_write_success_w_options_02(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
maxshape = (16, 1024)
chunking = (1, 1024)
compression = 'gzip'
dtype = np.float16
microdset = VirtualDataset(dset_name, None, maxshape=maxshape,
dtype=dtype, compression=compression, chunking=chunking)
writer = HDFwriter(h5_f)
h5_d = writer._create_empty_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.dtype, dtype)
self.assertEqual(h5_d.compression, compression)
self.assertEqual(h5_d.chunks, chunking)
self.assertEqual(h5_d.shape, maxshape)
self.assertEqual(h5_d.maxshape, maxshape)
os.remove(file_path)
def test_expandable_dset_write_success_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
maxshape = (None, 1024)
data = np.random.rand(1, 1024)
microdset = VirtualDataset(dset_name, data, maxshape=maxshape)
writer = HDFwriter(h5_f)
h5_d = writer._create_resizeable_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertEqual(h5_d.maxshape, maxshape)
self.assertTrue(np.allclose(h5_d[()], data))
# Now test to make sure that the dataset can be expanded:
# TODO: add this to the example!
expansion_axis = 0
h5_d.resize(h5_d.shape[expansion_axis] + 1, axis=expansion_axis)
self.assertEqual(h5_d.shape, (data.shape[0]+1, data.shape[1]))
self.assertEqual(h5_d.maxshape, maxshape)
# Finally try checking to see if this new data is also present in the file
new_data = np.random.rand(1024)
h5_d[1] = new_data
data = np.vstack((np.squeeze(data), new_data))
self.assertTrue(np.allclose(h5_d[()], data))
os.remove(file_path)
# TODO: will have to check to see if the parent is correctly declared for the group
def test_group_create_non_indexed_simple_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
grp_name = 'test'
micro_group = VirtualGroup(grp_name)
writer = HDFwriter(h5_f)
h5_grp = writer._create_group(h5_f, micro_group)
self.assertIsInstance(h5_grp, h5py.Group)
self.assertEqual(h5_grp.parent, h5_f)
self.assertEqual(h5_grp.name, '/' + grp_name)
# self.assertEqual(len(h5_grp.items), 0)
os.remove(file_path)
def test_group_create_indexed_simple_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
grp_name = 'test_'
micro_group = VirtualGroup(grp_name)
writer = HDFwriter(h5_f)
h5_grp = writer._create_group(h5_f, micro_group)
self.assertIsInstance(h5_grp, h5py.Group)
self.assertEqual(h5_grp.parent, h5_f)
self.assertEqual(h5_grp.name, '/' + grp_name + '000')
# self.assertEqual(len(h5_grp.items), 0)
os.remove(file_path)
def test_group_create_root_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
grp_name = ''
micro_group = VirtualGroup(grp_name)
writer = HDFwriter(h5_f)
with self.assertRaises(ValueError):
_ = writer._create_group(h5_f, micro_group)
os.remove(file_path)
def test_group_create_indexed_nested_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
outer_grp_name = 'outer_'
micro_group = VirtualGroup(outer_grp_name)
writer = HDFwriter(h5_f)
h5_outer_grp = writer._create_group(h5_f, micro_group)
self.assertIsInstance(h5_outer_grp, h5py.Group)
self.assertEqual(h5_outer_grp.parent, h5_f)
self.assertEqual(h5_outer_grp.name, '/' + outer_grp_name + '000')
inner_grp_name = 'inner_'
micro_group = VirtualGroup(inner_grp_name)
h5_inner_grp = writer._create_group(h5_outer_grp, micro_group)
self.assertIsInstance(h5_inner_grp, h5py.Group)
self.assertEqual(h5_inner_grp.parent, h5_outer_grp)
self.assertEqual(h5_inner_grp.name, h5_outer_grp.name + '/' + inner_grp_name + '000')
os.remove(file_path)
def test_write_legal_reg_ref_multi_dim_data(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_legal_reg_ref_multi_dim_data_2nd_dim(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 3)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(None), slice(0, None, 2)),
'odd_rows': (slice(None), slice(1, None, 2))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:, 0:None:2], data[:, 1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_legal_reg_ref_one_dim_data(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2)),
'odd_rows': (slice(1, None, 2))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_generate_and_write_reg_ref_legal(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(2, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': ['row_1', 'row_2']}
if sys.version_info.major == 3:
with self.assertWarns(UserWarning):
writer._write_dset_attributes(h5_dset, attrs.copy())
else:
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels']) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[0], data[1]]
written_data = [h5_dset[h5_dset.attrs['row_1']], h5_dset[h5_dset.attrs['row_2']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(np.squeeze(exp), np.squeeze(act)))
os.remove(file_path)
def test_generate_and_write_reg_ref_illegal(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(3, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
# with self.assertWarns(UserWarning):
writer._write_dset_attributes(h5_dset, {'labels': ['row_1', 'row_2']})
self.assertEqual(len(h5_dset.attrs), 0)
h5_f.flush()
os.remove(file_path)
def test_generate_and_write_reg_ref_illegal(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(2, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
# with self.assertWarns(UserWarning):
with self.assertRaises(TypeError):
writer._write_dset_attributes(h5_dset, {'labels': [1, np.arange(3)]})
os.remove(file_path)
def test_write_illegal_reg_ref_too_many_slices(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2), slice(None), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None), slice(None))}}
with self.assertRaises(ValueError):
writer._write_dset_attributes(h5_dset, attrs.copy())
os.remove(file_path)
def test_write_illegal_reg_ref_too_few_slices(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2)),
'odd_rows': (slice(1, None, 2))}}
with self.assertRaises(ValueError):
writer._write_dset_attributes(h5_dset, attrs.copy())
os.remove(file_path)
def test_write_reg_ref_slice_dim_larger_than_data(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, 15, 2), slice(None)),
'odd_rows': (slice(1, 15, 2), slice(None))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_illegal_reg_ref_not_slice_objs(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2), 15),
'odd_rows': (slice(1, None, 2), 'hello')}}
with self.assertRaises(TypeError):
writer._write_dset_attributes(h5_dset, attrs.copy())
os.remove(file_path)
def test_write_simple_atts_reg_ref_to_dset(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
writer._write_dset_attributes(h5_dset, attrs.copy())
reg_ref = attrs.pop('labels')
self.assertEqual(len(h5_dset.attrs), len(attrs) + 1 + len(reg_ref))
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_invalid_input(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
with self.assertRaises(TypeError):
_ = writer.write(np.arange(5))
def test_write_dset_under_root(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
micro_dset = VirtualDataset('test', data)
micro_dset.attrs = attrs.copy()
[h5_dset] = writer.write(micro_dset)
self.assertIsInstance(h5_dset, h5py.Dataset)
reg_ref = attrs.pop('labels')
self.assertEqual(len(h5_dset.attrs), len(attrs) + 1 + len(reg_ref))
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_dset_under_existing_group(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
h5_g = writer._create_group(h5_f, VirtualGroup('test_group'))
self.assertIsInstance(h5_g, h5py.Group)
data = np.random.rand(5, 7)
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
micro_dset = VirtualDataset('test', data, parent='/test_group')
micro_dset.attrs = attrs.copy()
[h5_dset] = writer.write(micro_dset)
self.assertIsInstance(h5_dset, h5py.Dataset)
self.assertEqual(h5_dset.parent, h5_g)
reg_ref = attrs.pop('labels')
self.assertEqual(len(h5_dset.attrs), len(attrs) + 1 + len(reg_ref))
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_dset_under_invalid_group(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
with self.assertRaises(KeyError):
_ = writer.write(VirtualDataset('test', np.random.rand(5, 7), parent='/does_not_exist'))
os.remove(file_path)
def test_write_root(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
micro_group = VirtualGroup('')
micro_group.attrs = attrs
writer = HDFwriter(h5_f)
[ret_val] = writer.write(micro_group)
self.assertIsInstance(ret_val, h5py.File)
self.assertEqual(h5_f, ret_val)
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_f, key) == expected_val))
os.remove(file_path)
def test_write_single_group(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
micro_group = VirtualGroup('Test_')
micro_group.attrs = attrs
writer = HDFwriter(h5_f)
[h5_group] = writer.write(micro_group)
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_group, key) == expected_val))
os.remove(file_path)
def test_group_indexing_sequential(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
micro_group_0 = VirtualGroup('Test_', attrs={'att_1': 'string_val', 'att_2': 1.2345})
[h5_group_0] = writer.write(micro_group_0)
_ = writer.write(VirtualGroup('blah'))
self.assertIsInstance(h5_group_0, h5py.Group)
self.assertEqual(h5_group_0.name, '/Test_000')
for key, expected_val in micro_group_0.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_0, key) == expected_val))
micro_group_1 = VirtualGroup('Test_', attrs={'att_3': [1, 2, 3, 4], 'att_4': ['str_1', 'str_2', 'str_3']})
[h5_group_1] = writer.write(micro_group_1)
self.assertIsInstance(h5_group_1, h5py.Group)
self.assertEqual(h5_group_1.name, '/Test_001')
for key, expected_val in micro_group_1.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_1, key) == expected_val))
os.remove(file_path)
def test_group_indexing_simultaneous(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
micro_group_0 = VirtualGroup('Test_', attrs = {'att_1': 'string_val', 'att_2': 1.2345})
micro_group_1 = VirtualGroup('Test_', attrs={'att_3': [1, 2, 3, 4], 'att_4': ['str_1', 'str_2', 'str_3']})
root_group = VirtualGroup('', children=[VirtualGroup('blah'), micro_group_0,
VirtualGroup('meh'), micro_group_1])
writer = HDFwriter(h5_f)
h5_refs_list = writer.write(root_group)
[h5_group_1] = get_h5_obj_refs(['Test_001'], h5_refs_list)
[h5_group_0] = get_h5_obj_refs(['Test_000'], h5_refs_list)
self.assertIsInstance(h5_group_0, h5py.Group)
self.assertEqual(h5_group_0.name, '/Test_000')
for key, expected_val in micro_group_0.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_0, key) == expected_val))
self.assertIsInstance(h5_group_1, h5py.Group)
self.assertEqual(h5_group_1.name, '/Test_001')
for key, expected_val in micro_group_1.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_1, key) == expected_val))
os.remove(file_path)
def test_write_simple_tree(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
inner_dset_data = np.random.rand(5, 7)
inner_dset_attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
inner_dset = VirtualDataset('inner_dset', inner_dset_data)
inner_dset.attrs = inner_dset_attrs.copy()
attrs_inner_grp = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
inner_group = VirtualGroup('indexed_inner_group_')
inner_group.attrs = attrs_inner_grp
inner_group.add_children(inner_dset)
outer_dset_data = np.random.rand(5, 7)
outer_dset_attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
outer_dset = VirtualDataset('test', outer_dset_data, parent='/test_group')
outer_dset.attrs = outer_dset_attrs.copy()
attrs_outer_grp = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
outer_group = VirtualGroup('unindexed_outer_group')
outer_group.attrs = attrs_outer_grp
outer_group.add_children([inner_group, outer_dset])
writer = HDFwriter(h5_f)
h5_refs_list = writer.write(outer_group)
# I don't know of a more elegant way to do this:
[h5_outer_dset] = get_h5_obj_refs([outer_dset.name], h5_refs_list)
[h5_inner_dset] = get_h5_obj_refs([inner_dset.name], h5_refs_list)
[h5_outer_group] = get_h5_obj_refs([outer_group.name], h5_refs_list)
[h5_inner_group] = get_h5_obj_refs(['indexed_inner_group_000'], h5_refs_list)
self.assertIsInstance(h5_outer_dset, h5py.Dataset)
self.assertIsInstance(h5_inner_dset, h5py.Dataset)
self.assertIsInstance(h5_outer_group, h5py.Group)
self.assertIsInstance(h5_inner_group, h5py.Group)
# check assertions for the inner dataset first
self.assertEqual(h5_inner_dset.parent, h5_inner_group)
reg_ref = inner_dset_attrs.pop('labels')
self.assertEqual(len(h5_inner_dset.attrs), len(inner_dset_attrs) + 1 + len(reg_ref))
for key, expected_val in inner_dset_attrs.items():
self.assertTrue(np.all(get_attr(h5_inner_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_inner_dset, 'labels')]))
expected_data = [inner_dset_data[:None:2], inner_dset_data[1:None:2]]
written_data = [h5_inner_dset[h5_inner_dset.attrs['even_rows']], h5_inner_dset[h5_inner_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
# check assertions for the inner data group next:
self.assertEqual(h5_inner_group.parent, h5_outer_group)
for key, expected_val in attrs_inner_grp.items():
self.assertTrue(np.all(get_attr(h5_inner_group, key) == expected_val))
# check the outer dataset next:
self.assertEqual(h5_outer_dset.parent, h5_outer_group)
reg_ref = outer_dset_attrs.pop('labels')
self.assertEqual(len(h5_outer_dset.attrs), len(outer_dset_attrs) + 1 + len(reg_ref))
for key, expected_val in outer_dset_attrs.items():
self.assertTrue(np.all(get_attr(h5_outer_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_outer_dset, 'labels')]))
expected_data = [outer_dset_data[:None:2], outer_dset_data[1:None:2]]
written_data = [h5_outer_dset[h5_outer_dset.attrs['even_rows']],
h5_outer_dset[h5_outer_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
# Finally check the outer group:
self.assertEqual(h5_outer_group.parent, h5_f)
for key, expected_val in attrs_outer_grp.items():
self.assertTrue(np.all(get_attr(h5_outer_group, key) == expected_val))
os.remove(file_path)
if __name__ == '__main__':
unittest.main()
| mit | 1,623,074,380,360,890,400 | 39.026519 | 124 | 0.552203 | false |
csmart/jockey-yum | setup.py | 1 | 1204 | #!/usr/bin/env python
# (c) 2007 Canonical Ltd.
# Author: Martin Pitt <martin.pitt@ubuntu.com>
# This script needs python-distutils-extra, an extension to the standard
# distutils which provides i18n, icon support, etc.
# https://launchpad.net/python-distutils-extra
from glob import glob
from distutils.version import StrictVersion
try:
import DistUtilsExtra.auto
except ImportError:
import sys
print >> sys.stderr, 'To build Jockey you need https://launchpad.net/python-distutils-extra'
sys.exit(1)
assert StrictVersion(DistUtilsExtra.auto.__version__) >= '2.4', 'needs DistUtilsExtra.auto >= 2.4'
DistUtilsExtra.auto.setup(
name='jockey',
version='0.9.3',
description='UI for managing third-party and non-free drivers',
url='https://launchpad.net/jockey',
license='GPL v2 or later',
author='Martin Pitt',
author_email='martin.pitt@ubuntu.com',
data_files = [
('share/jockey', ['backend/jockey-backend']),
('share/jockey', ['gtk/jockey-gtk.ui']), # bug in DistUtilsExtra.auto 2.2
('share/jockey', glob('kde/*.ui')), # don't use pykdeuic4
],
scripts = ['gtk/jockey-gtk', 'kde/jockey-kde', 'text/jockey-text'],
)
| gpl-2.0 | 4,649,148,657,068,648,000 | 31.540541 | 98 | 0.680233 | false |
gersolar/stations | stations_configuration/settings.py | 1 | 5198 | # Only Celery settings for stations project.
#import djcelery
#djcelery.setup_loader()
#BROKER_TRANSPORT = 'amqplib'
#BROKER_URL = 'django://'
##CELERY_RESULT_BACKEND = 'database'
#CELERY_DEFAULT_QUEUE = "default"
#CELERY_QUEUES = {
# "default": {
# "binding_key": "task.#",
# },
# "mailer": {
# "binding_key": "task.#",
# },
#}
#CELERY_ROUTES = {'downloader.tasks.check_email_schedule': {'queue': 'mailer'}}
#CELERY_TIMEZONE = 'UTC'
#CELERY_CONCURRENCY = 7
# Django settings for stations project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'stations.sqlite3',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC' # 'America/Buenos_Aires'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'fax%_3d9oshwed$!3s)jdn876jpj#5u&50m$6naau#&=zpyn%0'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'sslify.middleware.SSLifyMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'stations_configuration.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'stations_configuration.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'polymorphic',
'django.contrib.contenttypes',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'stations',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEMPLATE_DIRS='templates'
| mit | 4,052,354,373,346,144,000 | 28.039106 | 85 | 0.729127 | false |
lizardsystem/lizard-measure | lizard_measure/migrations/0010_auto__del_score__del_measuringrod__del_field_measurestatusmoment_is_pl.py | 1 | 23606 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Score'
db.delete_table('lizard_measure_score')
# Deleting model 'MeasuringRod'
db.delete_table('lizard_measure_measuringrod')
# Deleting field 'MeasureStatusMoment.is_planning'
db.delete_column('lizard_measure_measurestatusmoment', 'is_planning')
# Deleting field 'MeasureStatusMoment.date'
db.delete_column('lizard_measure_measurestatusmoment', 'date')
# Adding field 'MeasureStatusMoment.planning_date'
db.add_column('lizard_measure_measurestatusmoment', 'planning_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Adding field 'MeasureStatusMoment.realisation_date'
db.add_column('lizard_measure_measurestatusmoment', 'realisation_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Deleting field 'Measure.total_costs'
db.delete_column('lizard_measure_measure', 'total_costs')
# Adding field 'Measure.valid'
db.add_column('lizard_measure_measure', 'valid', self.gf('django.db.models.fields.NullBooleanField')(default=False, null=True, blank=True), keep_default=False)
# Adding field 'Measure.geom'
db.add_column('lizard_measure_measure', 'geom', self.gf('django.contrib.gis.db.models.fields.GeometryField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Adding model 'Score'
db.create_table('lizard_measure_score', (
('gep', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('area_ident', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('ascending', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('mep', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('measuring_rod', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_measure.MeasuringRod'])),
('limit_bad_insufficient', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('area', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_area.Area'], null=True, blank=True)),
('target_2027', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('limit_insufficient_moderate', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('target_2015', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
))
db.send_create_signal('lizard_measure', ['Score'])
# Adding model 'MeasuringRod'
db.create_table('lizard_measure_measuringrod', (
('group', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('sign', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('sub_measuring_rod', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('measuring_rod', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('id', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('unit', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
))
db.send_create_signal('lizard_measure', ['MeasuringRod'])
# Adding field 'MeasureStatusMoment.is_planning'
db.add_column('lizard_measure_measurestatusmoment', 'is_planning', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Adding field 'MeasureStatusMoment.date'
db.add_column('lizard_measure_measurestatusmoment', 'date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Deleting field 'MeasureStatusMoment.planning_date'
db.delete_column('lizard_measure_measurestatusmoment', 'planning_date')
# Deleting field 'MeasureStatusMoment.realisation_date'
db.delete_column('lizard_measure_measurestatusmoment', 'realisation_date')
# Adding field 'Measure.total_costs'
db.add_column('lizard_measure_measure', 'total_costs', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
# Deleting field 'Measure.valid'
db.delete_column('lizard_measure_measure', 'valid')
# Deleting field 'Measure.geom'
db.delete_column('lizard_measure_measure', 'geom')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lizard_area.area': {
'Meta': {'ordering': "('name',)", 'object_name': 'Area', '_ormbases': ['lizard_area.Communique']},
'area_class': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'communique_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_area.Communique']", 'unique': 'True', 'primary_key': 'True'}),
'data_administrator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.DataAdministrator']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'})
},
'lizard_area.communique': {
'Meta': {'object_name': 'Communique', '_ormbases': ['lizard_geo.GeoObject']},
'code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'geoobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_geo.GeoObject']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'lizard_area.dataadministrator': {
'Meta': {'object_name': 'DataAdministrator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_geo.geoobject': {
'Meta': {'object_name': 'GeoObject'},
'geo_object_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_geo.GeoObjectGroup']"}),
'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'lizard_geo.geoobjectgroup': {
'Meta': {'object_name': 'GeoObjectGroup'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'source_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_measure.fundingorganization': {
'Meta': {'object_name': 'FundingOrganization'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measure': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Measure']"}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Organization']"}),
'percentage': ('django.db.models.fields.FloatField', [], {})
},
'lizard_measure.krwstatus': {
'Meta': {'object_name': 'KRWStatus'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.krwwatertype': {
'Meta': {'object_name': 'KRWWatertype'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.measure': {
'Meta': {'ordering': "('id',)", 'object_name': 'Measure'},
'aggregation_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'areas': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'area_measure_set'", 'blank': 'True', 'to': "orm['lizard_area.Area']"}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.MeasureCategory']", 'symmetrical': 'False', 'blank': 'True'}),
'datetime_in_source': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'executive': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'executive_measure_set'", 'null': 'True', 'to': "orm['lizard_measure.Organization']"}),
'exploitation_costs': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'funding_organizations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.Organization']", 'through': "orm['lizard_measure.FundingOrganization']", 'symmetrical': 'False'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True', 'blank': 'True'}),
'geometry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_geo.GeoObject']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'import_raw': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'import_source': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'initiator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'initiator_measure_set'", 'null': 'True', 'to': "orm['lizard_measure.Organization']"}),
'investment_costs': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_KRW_measure': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_indicator': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'measure_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasureType']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Measure']", 'null': 'True', 'blank': 'True'}),
'period': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasurePeriod']", 'null': 'True', 'blank': 'True'}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'responsible_department': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'status_moments': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.MeasureStatus']", 'through': "orm['lizard_measure.MeasureStatusMoment']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Unit']"}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {}),
'waterbodies': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.WaterBody']", 'symmetrical': 'False', 'blank': 'True'})
},
'lizard_measure.measurecategory': {
'Meta': {'object_name': 'MeasureCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.measureperiod': {
'Meta': {'ordering': "('start_date', 'end_date')", 'object_name': 'MeasurePeriod'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.measurestatus': {
'Meta': {'ordering': "('-value',)", 'object_name': 'MeasureStatus'},
'color': ('lizard_map.models.ColorField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
'lizard_measure.measurestatusmoment': {
'Meta': {'ordering': "('measure__id', 'status__value')", 'object_name': 'MeasureStatusMoment'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'exploitation_expenditure': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investment_expenditure': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'measure': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Measure']"}),
'planning_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'realisation_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasureStatus']"})
},
'lizard_measure.measuretype': {
'Meta': {'ordering': "('code',)", 'object_name': 'MeasureType'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'combined_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'harmonisation': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'klass': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'subcategory': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'units': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.Unit']", 'symmetrical': 'False', 'blank': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.organization': {
'Meta': {'ordering': "('description',)", 'unique_together': "(('source', 'code'),)", 'object_name': 'Organization'},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.steeringparameter': {
'Meta': {'object_name': 'SteeringParameter'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']"}),
'fews_parameter': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'target_maximum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'target_minimum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_measure.unit': {
'Meta': {'object_name': 'Unit'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'conversion_factor': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dimension': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.waterbody': {
'Meta': {'object_name': 'WaterBody'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'}),
'area_ident': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'krw_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.KRWStatus']", 'null': 'True', 'blank': 'True'}),
'krw_watertype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.KRWWatertype']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['lizard_measure']
| gpl-3.0 | 1,789,020,140,202,263,800 | 77.949833 | 219 | 0.57604 | false |
konrad/kufpybio | kufpybiotools/generate_igr_gff.py | 1 | 1881 | #!/usr/bin/env python
__description__ = ""
__author__ = "Konrad Foerstner <konrad@foerstner.org>"
__copyright__ = "2013 by Konrad Foerstner <konrad@foerstner.org>"
__license__ = "ISC license"
__email__ = "konrad@foerstner.org"
__version__ = ""
import argparse
import csv
import sys
sys.path.append(".")
from kufpybio.gff3 import Gff3Parser, Gff3Entry
from kufpybio.gene import Gene
from kufpybio.igrfinder import IGRFinder
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument("gff_file", type=argparse.FileType("r"))
parser.add_argument("output_file", type=argparse.FileType("w"))
parser.add_argument("--margin", type=int, default=0)
parser.add_argument("--plus_only", default=False, action="store_true")
args = parser.parse_args()
# Build gene list
gene_list = []
gff_parser = Gff3Parser()
region_entry = None
for entry in gff_parser.entries(args.gff_file):
if entry.feature == "region":
region_entry = entry
continue
gene_list.append(Gene(
entry.seq_id, "", "", entry.start, entry.end,
entry.strand))
# Find IGRs and generate GFF file
igr_finder = IGRFinder()
args.output_file.write("##gff-version 3\n")
strands = ["+", "-"]
if args.plus_only is True:
strands = ["+"]
for start, end in igr_finder.find_igrs(gene_list, region_entry.end):
start = start + args.margin
end = end - args.margin
if end <= start:
continue
for strand in strands:
gff3_entry = Gff3Entry({
"seq_id" : region_entry.seq_id,
"source" : "IGR",
"feature" : "IGR",
"start" : start,
"end" : end,
"score" : ".",
"strand" : strand,
"phase" : ".",
"attributes" : "ID=IGR_%s_%s_to_%s" % (
region_entry.seq_id, start, end)})
args.output_file.write(str(gff3_entry) + "\n")
| isc | -7,492,041,156,369,239,000 | 30.35 | 70 | 0.61563 | false |
jakobmoss/tsa | utils/makeweights.py | 1 | 2350 | # -*- coding: utf-8 -*-
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Time Series Analysis -- Generate statistical weigts from scatter
#
# Author: Jakob Rørsted Mosumgaard
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
###############################################################################
# Modules
###############################################################################
from __future__ import print_function, with_statement, division
import numpy as np
import bottleneck as bn
###############################################################################
# Functions
###############################################################################
def genweight(datname, dpath, wpath):
"""
Combine time series with statistical weights calculated from scatter
Arguments:
- `datname`: Identifier of data file
- `dpath` : Path to data file (time series).
- `wpath` : Path to scatter file (with same time points!)
"""
# Pretty print
print('Generating weights for {0} !'.format(dpath))
# Load data and weights
t, d = np.loadtxt(dpath, unpack=True)
tt, sig = np.loadtxt(wpath, unpack=True)
# Check that times are indeed the same
tdif = t - tt
if tdif.any() != 0:
print('Error! Not the same time points! Quitting!')
exit()
# Moving variance (Hans: M = 50 - 100)
M = 70
movstd = bn.move_std(sig, M, min_count=1)
movvar = np.square(movstd)
# Remove first point
x = 1
t = t[x:]
d = d[x:]
movvar = movvar[x:]
# Calculate weights from scatter (1 / variance)
w = np.divide(1.0, movvar)
# Save
outfile = star + '_with-weights.txt'
np.savetxt(outfile, np.transpose([t, d, w]), fmt='%.15e', delimiter='\t')
# Done!
print('Done!\n')
###############################################################################
# Script
###############################################################################
if __name__ == "__main__":
# Definitions
datdir = '../../data/'
ext = '.txt'
append = '-high'
# Run for star 1
star = 'star01'
genweight(star, datdir + star + ext, star + append + ext)
# Run for star 2
star = 'star02'
genweight(star, datdir + star + ext, star + append + ext)
| mit | 56,344,320,398,218,080 | 28.3625 | 79 | 0.43593 | false |
erstis-go-botting/sexy-bot | misc.py | 1 | 1888 | import os
#checks if settings.ini should be generated. if not given universe, username and password it will generate a settings.ini with the default account
#This settings_generator will only work for universe 82 if the flag argument is given als True(to make sure that universe 82 is intended)
def settings_generator(universe = 82, username = 'defaultName', password = 'defaultPassword', flag=False):
path = os.path.normcase('settings/settings.ini')
if (os.path.isfile('settings/settings.ini')):
print("settings file found, stopping now.")
return
if (universe == 82 and not(flag)) or (username == 'defaultName') or (password == 'defaultPassword'):
print("Not all fields specified, fallback on default configuration")
universe = 82
username = 'defaultName'
password = 'defaultPassword'
if not (os.path.isdir('settings')):
os.makedir('settings')
with open(path,'w') as foo:
foo.write('[credentials]\nuniverse = '+ str(universe) +'\npassword = '+password+'\nusername = '+username)
print("Settings.ini generated")
def force_settings_generator(universe = 82, username = 'defaultName', password = 'defaultPassword', flag=False):
path = os.path.normcase('settings/settings.ini')
if not (os.path.isfile('settings/settings.ini')):
settings_generator(universe, username, password, flag)
return
if (universe == 82 and not(flag)) or (username == 'defaultName') or (password == 'defaultPassword'):
print("Not all fields specified, fallback on default configuration")
universe = 82
username = 'defaultName'
password = 'defaultPassword'
with open(path,'w') as foo:
foo.write('[credentials]\nuniverse = '+ str(universe) +'\npassword = '+password+'\nusername = '+username)
print("Settings.ini generated")
#settings_generator()
| mit | 7,923,967,946,050,228,000 | 50.027027 | 146 | 0.678496 | false |
ActiveState/code | recipes/Python/275366_Email_address_leech/recipe-275366.py | 1 | 1624 | import re
def test():
text = \
''' You can contact us at myname@server.site.com
or at yourname AT server DOT site DOT com.
Also at o u r n a m e @ s e r v e r dot s i t e dot c o m
and t.h.e.i.r.n.a.m.e at server dot s/i/t/e DOT COM.
'''
for email in emailLeech(text): print email
DOMAINS = ["com","edu","net","org","gov","us"] #.. and so on
FLAGS = re.IGNORECASE | re.VERBOSE
AT = r'(?: @ | \b A \s* T \b)'
ADDRESSPART = r'\b (?: \w+ | \w (?:(?:\s+|\W) \w)*) \b'
DOMAIN = r'(?:%s)' % '|'.join(["(?:\s*|\W)".join(domain) for domain in DOMAINS])
NONWORD = re.compile(r'\W+')
DOT_REGEX = re.compile(r'(?: \. | \b D \s* O \s* T \b)', FLAGS)
EMAIL_REGEX = re.compile(
(r'(?P<name>%s) \W* %s \W*' % (ADDRESSPART,AT)) +
r'(?P<site>(?: %s \W* %s \W*)+)' % (ADDRESSPART, DOT_REGEX.pattern) +
r'(?P<domain>%s)' % DOMAIN, FLAGS)
def emailLeech(text):
''' An iterator over recognized email addresses within text'''
while (True):
match = EMAIL_REGEX.search(text)
if not match: break
parts = [match.group("name")] + \
DOT_REGEX.split(match.group("site")) + \
[match.group("domain")]
# discard non word chars
parts = [NONWORD.sub('',part) for part in parts]
# discard all empty parts and make lowercase
parts = [part.lower() for part in parts if len(part)>0]
# join the parts
yield "%s@%s.%s" % (parts[0], '.'.join(parts[1:-1]), parts[-1])
text = text[match.end():]
if __name__ == '__main__': test()
| mit | 6,945,036,452,348,633,000 | 35.088889 | 80 | 0.513547 | false |
mosdef-hub/foyer | foyer/tests/test_forcefield_parameters.py | 1 | 10029 | import numpy as np
import pytest
from foyer import Forcefield, forcefields
from foyer.exceptions import MissingForceError, MissingParametersError
from foyer.forcefield import get_available_forcefield_loaders
from foyer.tests.base_test import BaseTest
from foyer.tests.utils import get_fn
@pytest.mark.skipif(
condition="load_GAFF"
not in map(lambda func: func.__name__, get_available_forcefield_loaders()),
reason="GAFF Plugin is not installed",
)
class TestForcefieldParameters(BaseTest):
@pytest.fixture(scope="session")
def gaff(self):
return forcefields.load_GAFF()
def test_gaff_missing_group(self, gaff):
with pytest.raises(ValueError):
gaff.get_parameters("missing", key=[])
def test_gaff_non_string_keys(self, gaff):
with pytest.raises(TypeError):
gaff.get_parameters("atoms", key=1)
def test_gaff_bond_parameters_gaff(self, gaff):
bond_params = gaff.get_parameters("harmonic_bonds", ["br", "ca"])
assert np.isclose(bond_params["length"], 0.19079)
assert np.isclose(bond_params["k"], 219827.36)
def test_gaff_bond_params_reversed(self, gaff):
assert gaff.get_parameters(
"harmonic_bonds", ["ca", "br"]
) == gaff.get_parameters("harmonic_bonds", ["ca", "br"])
def test_gaff_missing_bond_parameters(self, gaff):
with pytest.raises(MissingParametersError):
gaff.get_parameters("harmonic_bonds", ["str1", "str2"])
def test_gaff_angle_parameters(self, gaff):
angle_params = gaff.get_parameters("harmonic_angles", ["f", "c1", "f"])
assert np.allclose(
[angle_params["theta"], angle_params["k"]],
[3.141592653589793, 487.0176],
)
def test_gaff_angle_parameters_reversed(self, gaff):
assert np.allclose(
list(
gaff.get_parameters(
"harmonic_angles", ["f", "c2", "ha"]
).values()
),
list(
gaff.get_parameters(
"harmonic_angles", ["ha", "c2", "f"]
).values()
),
)
def test_gaff_missing_angle_parameters(self, gaff):
with pytest.raises(MissingParametersError):
gaff.get_parameters("harmonic_angles", ["1", "2", "3"])
def test_gaff_periodic_proper_parameters(self, gaff):
periodic_proper_params = gaff.get_parameters(
"periodic_propers", ["c3", "c", "sh", "hs"]
)
assert np.allclose(periodic_proper_params["periodicity"], [2.0, 1.0])
assert np.allclose(
periodic_proper_params["k"], [9.414, 5.4392000000000005]
)
assert np.allclose(
periodic_proper_params["phase"],
[3.141592653589793, 3.141592653589793],
)
def test_gaff_periodic_proper_parameters_reversed(self, gaff):
assert np.allclose(
list(
gaff.get_parameters(
"periodic_propers", ["c3", "c", "sh", "hs"]
).values()
),
list(
gaff.get_parameters(
"periodic_propers", ["hs", "sh", "c", "c3"]
).values()
),
)
def test_gaff_periodic_improper_parameters(self, gaff):
periodic_improper_params = gaff.get_parameters(
"periodic_impropers", ["c", "", "o", "o"]
)
assert np.allclose(periodic_improper_params["periodicity"], [2.0])
assert np.allclose(periodic_improper_params["k"], [4.6024])
assert np.allclose(
periodic_improper_params["phase"], [3.141592653589793]
)
def test_gaff_periodic_improper_parameters_reversed(self, gaff):
assert np.allclose(
list(
gaff.get_parameters(
"periodic_impropers", ["c", "", "o", "o"]
).values()
),
list(
gaff.get_parameters(
"periodic_impropers", ["c", "o", "", "o"]
).values()
),
)
def test_gaff_proper_params_missing(self, gaff):
with pytest.raises(MissingParametersError):
gaff.get_parameters("periodic_impropers", ["a", "b", "c", "d"])
def test_gaff_scaling_factors(self, gaff):
assert gaff.lj14scale == 0.5
assert np.isclose(gaff.coulomb14scale, 0.833333333)
def test_opls_get_parameters_atoms(self, oplsaa):
atom_params = oplsaa.get_parameters("atoms", "opls_145")
assert atom_params["sigma"] == 0.355
assert atom_params["epsilon"] == 0.29288
def test_opls_get_parameters_atoms_list(self, oplsaa):
atom_params = oplsaa.get_parameters("atoms", ["opls_145"])
assert atom_params["sigma"] == 0.355
assert atom_params["epsilon"] == 0.29288
def test_opls_get_parameters_atom_class(self, oplsaa):
atom_params = oplsaa.get_parameters(
"atoms", "CA", keys_are_atom_classes=True
)
assert atom_params["sigma"] == 0.355
assert atom_params["epsilon"] == 0.29288
def test_opls_get_parameters_bonds(self, oplsaa):
bond_params = oplsaa.get_parameters(
"harmonic_bonds", ["opls_760", "opls_145"]
)
assert bond_params["length"] == 0.146
assert bond_params["k"] == 334720.0
def test_opls_get_parameters_bonds_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_bonds", ["opls_760", "opls_145"]
).values()
),
list(
oplsaa.get_parameters(
"harmonic_bonds", ["opls_145", "opls_760"]
).values()
),
)
def test_opls_get_parameters_bonds_atom_classes_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_bonds", ["C_2", "O_2"], True
).values()
),
list(
oplsaa.get_parameters(
"harmonic_bonds", ["O_2", "C_2"], True
).values()
),
)
def test_opls_get_parameters_angle(self, oplsaa):
angle_params = oplsaa.get_parameters(
"harmonic_angles", ["opls_166", "opls_772", "opls_167"]
)
assert np.allclose(
[angle_params["theta"], angle_params["k"]], [2.0943950239, 585.76]
)
def test_opls_get_parameters_angle_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_angles", ["opls_166", "opls_772", "opls_167"]
).values()
),
list(
oplsaa.get_parameters(
"harmonic_angles", ["opls_167", "opls_772", "opls_166"]
).values()
),
)
def test_opls_get_parameters_angle_atom_classes(self, oplsaa):
angle_params = oplsaa.get_parameters(
"harmonic_angles", ["CA", "C_2", "CA"], keys_are_atom_classes=True
)
assert np.allclose(
[angle_params["theta"], angle_params["k"]], [2.09439510239, 711.28]
)
def test_opls_get_parameters_angle_atom_classes_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_angles",
["CA", "C", "O"],
keys_are_atom_classes=True,
).values()
),
list(
oplsaa.get_parameters(
"harmonic_angles",
["O", "C", "CA"],
keys_are_atom_classes=True,
).values()
),
)
def test_opls_get_parameters_rb_proper(self, oplsaa):
proper_params = oplsaa.get_parameters(
"rb_propers", ["opls_215", "opls_215", "opls_235", "opls_269"]
)
assert np.allclose(
[
proper_params["c0"],
proper_params["c1"],
proper_params["c2"],
proper_params["c3"],
proper_params["c4"],
proper_params["c5"],
],
[2.28446, 0.0, -2.28446, 0.0, 0.0, 0.0],
)
def test_get_parameters_rb_proper_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"rb_propers",
["opls_215", "opls_215", "opls_235", "opls_269"],
).values()
),
list(
oplsaa.get_parameters(
"rb_propers",
["opls_269", "opls_235", "opls_215", "opls_215"],
).values()
),
)
def test_opls_get_parameters_wildcard(self, oplsaa):
proper_params = oplsaa.get_parameters(
"rb_propers", ["", "opls_235", "opls_544", ""]
)
assert np.allclose(
[
proper_params["c0"],
proper_params["c1"],
proper_params["c2"],
proper_params["c3"],
proper_params["c4"],
proper_params["c5"],
],
[30.334, 0.0, -30.334, 0.0, 0.0, 0.0],
)
def test_opls_missing_force(self, oplsaa):
with pytest.raises(MissingForceError):
oplsaa.get_parameters("periodic_propers", key=["a", "b", "c", "d"])
def test_opls_scaling_factors(self, oplsaa):
assert oplsaa.lj14scale == 0.5
assert oplsaa.coulomb14scale == 0.5
def test_missing_scaling_factors(self):
ff = Forcefield(forcefield_files=(get_fn("validate_customtypes.xml")))
with pytest.raises(AttributeError):
assert ff.lj14scale
with pytest.raises(AttributeError):
assert ff.coulomb14scale
| mit | 3,671,707,264,193,672,000 | 33.582759 | 79 | 0.5172 | false |
ecell/ecell3 | ecell/pyecell/ecell/analysis/PathwayProxy.py | 1 | 13263 | #!/usr/bin/env python
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# This file is part of the E-Cell System
#
# Copyright (C) 1996-2016 Keio University
# Copyright (C) 2008-2016 RIKEN
# Copyright (C) 2005-2009 The Molecular Sciences Institute
#
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
#
# E-Cell System is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# E-Cell System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with E-Cell System -- see the file COPYING.
# If not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#END_HEADER
"""
A program for handling and defining a pathway.
This program is the extension package for E-Cell System Version 3.
"""
__program__ = 'PathwayProxy'
__version__ = '1.0'
__author__ = 'Kazunari Kaizu <kaizu@sfc.keio.ac.jp>'
__coyright__ = ''
__license__ = ''
import ecell.eml
from ecell.ecssupport import *
from ecell.analysis.util import createVariableReferenceFullID
import copy
import numpy
class PathwayProxy:
def __init__( self, anEmlSupport, processList=None ):
'''
anEmlSupport: Eml support object
processList: (list) a list of process full path
'''
self.theEmlSupport = anEmlSupport
if processList:
self.setProcessList( processList )
else:
self.setProcessList( [] )
# end of __init__
def setProcessList( self, processList ):
'''
set and detect a pathway
processList: (list) a list of process full ID
'''
# check the existence of processes,
# and create relatedVariableList
self.__processList = []
self.__variableList = []
for processFullID in processList:
# if not self.theEmlSupport.isEntityExist( processFullID ):
# continue
self.__processList.append( processFullID )
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
if self.__variableList.count( fullIDString ) == 0:
self.__variableList.append( fullIDString )
self.__processList.sort()
self.__variableList.sort()
# end of setProcessList
def getProcessList( self ):
'''
return processList
'''
return copy.copy( self.__processList )
# end of getProcessList
def addProcess( self, processFullID ):
'''
add a process to the pathway
processFullID: (str) a process full ID
'''
if not self.__processList.count( processFullID ) == 0:
return
# elif not ecell.eml.Eml.isEntityExist( processFullID ):
# return
# add process
self.__processList.append( processFullID )
self.__processList.sort()
# update the related variable list
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
return
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
if self.__variableList.count( fullIDString ) == 0:
self.__variableList.append( fullIDString )
self.__variableList.sort()
# end of addProcess
def removeProcess( self, processIndexList ):
'''
remove processes from the pathway
processIndexList: (list) a list of indices of processes
'''
indexList = copy.copy( processIndexList )
indexList.sort()
indexList.reverse()
removedProcessList = []
for i in indexList:
if len( self.__processList ) > i:
removedProcessList.append( self.__processList.pop( i ) )
removedVariableList = []
for processFullID in removedProcessList:
# if not ecell.eml.Eml.isEntityExist( self.theEmlSupport, processFullID ):
# continue
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
if removedVariableList.count( fullIDString ) == 0:
removedVariableList.append( fullIDString )
for processFullID in self.__processList:
# if not self.theEmlSupport.isEntityExist( processFullID ):
# continue
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
if not removedVariableList.count( fullIDString ) == 0:
removedVariableList.remove( fullIDString )
for variableFullID in removedVariableList:
self.__variableList.remove( variableFullID )
# end of removeProcess
def take( self, processIndexList ):
'''
create and return a sub-pathway
processIndexList: (list) a list of indices of processes
return PathwayProxy
'''
processList = []
for i in processIndexList:
if len( self.__processList ) > i:
processList.append( self.__processList[ i ] )
subPathway = PathwayProxy( self.theEmlSupport, processList )
return subPathway
# end of removeProcess
def getVariableList( self ):
'''
return relatedVariableList
'''
return copy.copy( self.__variableList )
# end of getVariableList
def removeVariable( self, variableIndexList ):
'''
remove variables from the pathway
variableIndexList: (list) a list of indices of variables
'''
indexList = copy.copy( variableIndexList )
indexList.sort()
indexList.reverse()
for i in indexList:
if len( self.__variableList ) > i:
self.__variableList.pop( i )
# end of removeVariable
def addVariable( self, variableFullID ):
'''
recover a removed variable to the pathway
variableFullID: (str) a variable full ID
'''
if not self.__variableList.count( variableFullID ) == 0:
return 1
# elif not ecell.eml.Eml.isEntityExist( variableFullID ):
# return 0
for processFullID in self.__processList:
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = fullID[ 1 ] + ':' + fullID[ 2 ]
if fullIDString == variableFullID:
self.__variableList.append( variableFullID )
self.__variableList.sort()
return 1
return 0
# end of addProcess
def getIncidentMatrix( self, mode=0 ):
'''
create the incident matrix (array)
mode: (0 or 1) 0 means that only the \'write\' variables are checked. 0 is set as default.
return incidentMatrix
'''
incidentMatrix = numpy.zeros( ( len( self.__variableList ), len( self.__processList ) ) )
for j in range( len( self.__processList ) ):
processFullID = self.__processList[ j ]
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
try:
i = self.__variableList.index( fullIDString )
except ValueError:
# should some warning message be showed?
continue
if mode:
if len( aVariableReference ) > 2:
coeff = int( aVariableReference[ 2 ] )
if coeff != 0:
incidentMatrix[ i ][ j ] = 1
else:
incidentMatrix[ i ][ j ] = 1
return incidentMatrix
# end of getIncidentMatrix
def getStoichiometryMatrix( self ):
'''
create the stoichiometry matrix (array)
return stoichiometryMatrix
'''
stoichiometryMatrix = numpy.zeros( ( len( self.__variableList ), len( self.__processList ) ), float )
for j in range( len( self.__processList ) ):
processFullID = self.__processList[ j ]
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
try:
i = self.__variableList.index( fullIDString )
except ValueError:
# should some warning message be showed?
continue
if len( aVariableReference ) > 2:
coeff = int( aVariableReference[ 2 ] )
if coeff != 0:
stoichiometryMatrix[ i ][ j ] += coeff
return stoichiometryMatrix
# end of getStoichiometryMatrix
def getReversibilityList( self ):
'''
check and return the reversibilities (isReversible) for processes
default value is 0, irreversible
return reversibilityList
'''
reversibilityList = []
for processFullID in self.__processList:
propertyList = self.theEmlSupport.getEntityPropertyList( processFullID )
if propertyList.count( 'isReversible' ) != 0:
# isReversible is handled as float
isReversible = float( self.theEmlSupport.getEntityProperty( processFullID + ':isReversible' )[ 0 ] )
reversibilityList.append( int( isReversible ) )
else:
# default value, irreversible
reversibilityList.append( 0 )
return reversibilityList
# end of getReversibilityList
# end of PathwayProxy
if __name__ == '__main__':
from emlsupport import EmlSupport
import sys
import os
def main( filename ):
anEmlSupport = EmlSupport( filename )
pathwayProxy = anEmlSupport.createPathwayProxy()
print 'process list ='
print pathwayProxy.getProcessList()
print 'related variable list ='
print pathwayProxy.getVariableList()
print 'incident matrix ='
print pathwayProxy.getIncidentMatrix()
print 'stoichiometry matrix ='
print pathwayProxy.getStoichiometryMatrix()
print 'reversibility list ='
print pathwayProxy.getReversibilityList()
# end of main
if len( sys.argv ) > 1:
main( sys.argv[ 1 ] )
else:
filename = '../../../../doc/samples/Heinrich/Heinrich.eml'
main( os.path.abspath( filename ) )
| lgpl-3.0 | 1,427,795,092,905,206,500 | 30.133803 | 121 | 0.586594 | false |
Syralist/pixels_clock | clock.py | 1 | 3227 | # -*- coding: utf-8 -*-
import pygame, led, sys, os, random, csv
import smbus
from pygame.locals import *
from led.PixelEventHandler import *
from time import gmtime, strftime
""" A very simple arcade shooter demo :)
"""
random.seed()
BLACK = pygame.Color(0,0,0)
WHITE = pygame.Color(255, 255, 255)
RED = pygame.Color(255, 0, 0)
GREEN = pygame.Color(0, 255, 0)
adress = 0x48
LM75 = smbus.SMBus(1)
# detect if a serial/USB port is given as argument
hasSerialPortParameter = ( sys.argv.__len__() > 1 )
# use 90 x 20 matrix when no usb port for real display provided
fallbackSize = ( 90, 20 )
if hasSerialPortParameter:
serialPort = sys.argv[1]
print "INITIALIZING WITH USB-PORT: " + serialPort
ledDisplay = led.teensy.TeensyDisplay(serialPort, fallbackSize)
else:
print "INITIALIZING WITH SERVER DISPLAY AND SIMULATOR."
ledDisplay = led.dsclient.DisplayServerClientDisplay('localhost', 8123, fallbackSize)
# use same size for sim and real LED panel
size = ledDisplay.size()
simDisplay = led.sim.SimDisplay(size)
screen = pygame.Surface(size)
gamestate = 0 #1=alive; 0=dead
def main():
pygame.init()
pygame.font.init()
clock = pygame.time.Clock()
pygame.joystick.init()
gameover = False
# Initialize first joystick
if pygame.joystick.get_count() > 0:
stick = pygame.joystick.Joystick(0)
stick.init()
global gamestate
scored = False
# Clear event list before starting the game
pygame.event.clear()
while not gameover:
# Process event queue
for pgevent in pygame.event.get():
if pgevent.type == QUIT:
pygame.quit()
sys.exit()
event = process_event(pgevent)
# End the game
if event.button == EXIT:
gameover = True
# Keypresses on keyboard and joystick axis motions / button presses
elif event.type == PUSH:
# Movements
if event.button == UP:
pass
elif event.button == DOWN:
pass
elif event.button == RIGHT:
pass
elif event.button == LEFT:
pass
# Tower selection
elif event.button == B2:
pass
# Tower placement
elif event.button == P1:
gameover = True
# Only on Keyboard
elif pgevent.type == KEYDOWN and pgevent.key == K_ESCAPE:
gameover = True
screen.fill(BLACK)
font = pygame.font.SysFont("Arial", 12)
text1 = font.render(strftime("%H:%M:%S"), 0, RED)
text1pos = text1.get_rect()
text1pos.midtop = (screen.get_rect().centerx, -1)
screen.blit(text1,text1pos)
try:
temp = LM75.read_byte(adress)
except:
temp = -1
text2 = font.render("T: "+str(temp)+"'C", 0, GREEN)
text2pos = text2.get_rect()
text2pos.midbottom = (screen.get_rect().centerx, 23)
screen.blit(text2,text2pos)
simDisplay.update(screen)
ledDisplay.update(screen)
clock.tick(10)
main()
| gpl-3.0 | 2,032,024,441,789,479,700 | 26.117647 | 89 | 0.578866 | false |
maverickYQB/mqtt_zway | test/test_main_class.py | 1 | 2397 | #!/usr/bin/env python
'''
Created on Mars 20 2016
@author: popotvin
'''
import mqtt_zway_test
import mqtt_zway
import paho.mqtt.client as mqtt
import time
import traceback
date_time = mqtt_zway_test.date_time
# Main variables
mqtt_old_payload = []
mqtt_new_payload = []
payload = {}
publish_string = ""
# MQTT config
outgoing_topic = mqtt_zway_test.outgoing_topic
ongoing_topic = mqtt_zway_test.ongoing_topic
mqtt_ip = mqtt_zway_test.mqtt_ip
mqtt_port = mqtt_zway_test.mqtt_port
mqtt_client = mqtt_zway_test.mqtt_client
# ZWAY config
zway_ip = mqtt_zway_test.zway_ip
zway_port = mqtt_zway_test.zway_port
# list of connected devices on the zway server (device_id, device type, device level value)
zway_devList = mqtt_zway.zway_devList(zway_ip,zway_port)
# MQTT Client init
mqttc = mqtt.Client(str(mqtt_client))
mqttc.on_subscribe = mqtt_zway_test.on_subscribe
mqttc.on_message = mqtt_zway_test.on_message
mqttc.on_connect = mqtt_zway_test.on_connect
mqttc.connect(mqtt_ip, mqtt_port)
# Test zway and MQTT servers
zway_test = mqtt_zway.server_test(zway_ip, zway_port)
mqtt_test = mqtt_zway.server_test(mqtt_ip, mqtt_port)
# Main loop
if zway_test and mqtt_test:
print "ZWAY is running at: %s"% str(date_time)
print "MQTT is running at: %s"% str(date_time)
while True:
try:
mqttc.loop()
for key, value in zway_devList.dev_dict().iteritems():
for i,j in value.iteritems():
if i == "id":
dev_id = j
elif i == "type":
dev_type = j
zway_devList.dev_get(dev_id, dev_type)
payload["device_id"] = str(dev_id)
payload["type"] = str(dev_type)
payload["value"] = zway_devList.dev_value(dev_id, dev_type)
mqtt_new_payload.append(dict(payload))
time.sleep(0.1)
if mqtt_old_payload != mqtt_new_payload:
mqttc.publish(outgoing_topic, str(mqtt_new_payload))
#print "published to mQTT: %s" % mqtt_new_payload
mqtt_old_payload = mqtt_new_payload
mqtt_new_payload = []
time.sleep(0.5)
except Exception, e:
print traceback.print_exc()
break
elif not zway_test:
print "ZWAY server is offline"
elif not mqtt_test:
print "MQTT server is Offline"
| gpl-3.0 | 1,230,353,802,321,709 | 28.9625 | 91 | 0.619942 | false |
tylerclair/py3canvas | py3canvas/apis/modules.py | 1 | 54047 | """Modules API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class ModulesAPI(BaseCanvasAPI):
"""Modules API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for ModulesAPI."""
super(ModulesAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.ModulesAPI")
def list_modules(self, course_id, include=None, search_term=None, student_id=None):
"""
List modules.
List the modules in a course
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - include
"""- "items": Return module items inline if possible.
This parameter suggests that Canvas return module items directly
in the Module object JSON, to avoid having to make separate API
requests for each module when enumerating modules and items. Canvas
is free to omit 'items' for any particular module if it deems them
too numerous to return inline. Callers must be prepared to use the
{api:ContextModuleItemsApiController#index List Module Items API}
if items are not returned.
- "content_details": Requires include['items']. Returns additional
details with module items specific to their associated content items.
Includes standard lock information for each item."""
if include is not None:
self._validate_enum(include, ["items", "content_details"])
params["include"] = include
# OPTIONAL - search_term
"""The partial name of the modules (and module items, if include['items'] is
specified) to match and return."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - student_id
"""Returns module completion information for the student with this id."""
if student_id is not None:
params["student_id"] = student_id
self.logger.debug("GET /api/v1/courses/{course_id}/modules with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/modules".format(**path), data=data, params=params, all_pages=True)
def show_module(self, id, course_id, include=None, student_id=None):
"""
Show module.
Get information about a single module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - include
"""- "items": Return module items inline if possible.
This parameter suggests that Canvas return module items directly
in the Module object JSON, to avoid having to make separate API
requests for each module when enumerating modules and items. Canvas
is free to omit 'items' for any particular module if it deems them
too numerous to return inline. Callers must be prepared to use the
{api:ContextModuleItemsApiController#index List Module Items API}
if items are not returned.
- "content_details": Requires include['items']. Returns additional
details with module items specific to their associated content items.
Includes standard lock information for each item."""
if include is not None:
self._validate_enum(include, ["items", "content_details"])
params["include"] = include
# OPTIONAL - student_id
"""Returns module completion information for the student with this id."""
if student_id is not None:
params["student_id"] = student_id
self.logger.debug("GET /api/v1/courses/{course_id}/modules/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/modules/{id}".format(**path), data=data, params=params, single_item=True)
def create_module(self, course_id, module_name, module_position=None, module_prerequisite_module_ids=None, module_publish_final_grade=None, module_require_sequential_progress=None, module_unlock_at=None):
"""
Create a module.
Create and return a new module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - module[name]
"""The name of the module"""
data["module[name]"] = module_name
# OPTIONAL - module[unlock_at]
"""The date the module will unlock"""
if module_unlock_at is not None:
if issubclass(module_unlock_at.__class__, str):
module_unlock_at = self._validate_iso8601_string(module_unlock_at)
elif issubclass(module_unlock_at.__class__, date) or issubclass(module_unlock_at.__class__, datetime):
module_unlock_at = module_unlock_at.strftime('%Y-%m-%dT%H:%M:%S+00:00')
data["module[unlock_at]"] = module_unlock_at
# OPTIONAL - module[position]
"""The position of this module in the course (1-based)"""
if module_position is not None:
data["module[position]"] = module_position
# OPTIONAL - module[require_sequential_progress]
"""Whether module items must be unlocked in order"""
if module_require_sequential_progress is not None:
data["module[require_sequential_progress]"] = module_require_sequential_progress
# OPTIONAL - module[prerequisite_module_ids]
"""IDs of Modules that must be completed before this one is unlocked.
Prerequisite modules must precede this module (i.e. have a lower position
value), otherwise they will be ignored"""
if module_prerequisite_module_ids is not None:
data["module[prerequisite_module_ids]"] = module_prerequisite_module_ids
# OPTIONAL - module[publish_final_grade]
"""Whether to publish the student's final grade for the course upon
completion of this module."""
if module_publish_final_grade is not None:
data["module[publish_final_grade]"] = module_publish_final_grade
self.logger.debug("POST /api/v1/courses/{course_id}/modules with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules".format(**path), data=data, params=params, single_item=True)
def update_module(self, id, course_id, module_name=None, module_position=None, module_prerequisite_module_ids=None, module_publish_final_grade=None, module_published=None, module_require_sequential_progress=None, module_unlock_at=None):
"""
Update a module.
Update and return an existing module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - module[name]
"""The name of the module"""
if module_name is not None:
data["module[name]"] = module_name
# OPTIONAL - module[unlock_at]
"""The date the module will unlock"""
if module_unlock_at is not None:
if issubclass(module_unlock_at.__class__, str):
module_unlock_at = self._validate_iso8601_string(module_unlock_at)
elif issubclass(module_unlock_at.__class__, date) or issubclass(module_unlock_at.__class__, datetime):
module_unlock_at = module_unlock_at.strftime('%Y-%m-%dT%H:%M:%S+00:00')
data["module[unlock_at]"] = module_unlock_at
# OPTIONAL - module[position]
"""The position of the module in the course (1-based)"""
if module_position is not None:
data["module[position]"] = module_position
# OPTIONAL - module[require_sequential_progress]
"""Whether module items must be unlocked in order"""
if module_require_sequential_progress is not None:
data["module[require_sequential_progress]"] = module_require_sequential_progress
# OPTIONAL - module[prerequisite_module_ids]
"""IDs of Modules that must be completed before this one is unlocked
Prerequisite modules must precede this module (i.e. have a lower position
value), otherwise they will be ignored"""
if module_prerequisite_module_ids is not None:
data["module[prerequisite_module_ids]"] = module_prerequisite_module_ids
# OPTIONAL - module[publish_final_grade]
"""Whether to publish the student's final grade for the course upon
completion of this module."""
if module_publish_final_grade is not None:
data["module[publish_final_grade]"] = module_publish_final_grade
# OPTIONAL - module[published]
"""Whether the module is published and visible to students"""
if module_published is not None:
data["module[published]"] = module_published
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{id}".format(**path), data=data, params=params, single_item=True)
def delete_module(self, id, course_id):
"""
Delete module.
Delete a module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("DELETE /api/v1/courses/{course_id}/modules/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/courses/{course_id}/modules/{id}".format(**path), data=data, params=params, single_item=True)
def re_lock_module_progressions(self, id, course_id):
"""
Re-lock module progressions.
Resets module progressions to their default locked state and
recalculates them based on the current requirements.
Adding progression requirements to an active course will not lock students
out of modules they have already unlocked unless this action is called.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{id}/relock with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{id}/relock".format(**path), data=data, params=params, single_item=True)
def list_module_items(self, course_id, module_id, include=None, search_term=None, student_id=None):
"""
List module items.
List the items in a module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# OPTIONAL - include
"""If included, will return additional details specific to the content
associated with each item. Refer to the {api:Modules:Module%20Item Module
Item specification} for more details.
Includes standard lock information for each item."""
if include is not None:
self._validate_enum(include, ["content_details"])
params["include"] = include
# OPTIONAL - search_term
"""The partial title of the items to match and return."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - student_id
"""Returns module completion information for the student with this id."""
if student_id is not None:
params["student_id"] = student_id
self.logger.debug("GET /api/v1/courses/{course_id}/modules/{module_id}/items with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/modules/{module_id}/items".format(**path), data=data, params=params, all_pages=True)
def show_module_item(self, id, course_id, module_id, include=None, student_id=None):
"""
Show module item.
Get information about a single module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - include
"""If included, will return additional details specific to the content
associated with this item. Refer to the {api:Modules:Module%20Item Module
Item specification} for more details.
Includes standard lock information for each item."""
if include is not None:
self._validate_enum(include, ["content_details"])
params["include"] = include
# OPTIONAL - student_id
"""Returns module completion information for the student with this id."""
if student_id is not None:
params["student_id"] = student_id
self.logger.debug("GET /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}".format(**path), data=data, params=params, single_item=True)
def create_module_item(self, course_id, module_id, module_item_type, module_item_content_id, module_item_completion_requirement_min_score=None, module_item_completion_requirement_type=None, module_item_external_url=None, module_item_indent=None, module_item_new_tab=None, module_item_page_url=None, module_item_position=None, module_item_title=None):
"""
Create a module item.
Create and return a new module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# OPTIONAL - module_item[title]
"""The name of the module item and associated content"""
if module_item_title is not None:
data["module_item[title]"] = module_item_title
# REQUIRED - module_item[type]
"""The type of content linked to the item"""
self._validate_enum(module_item_type, ["File", "Page", "Discussion", "Assignment", "Quiz", "SubHeader", "ExternalUrl", "ExternalTool"])
data["module_item[type]"] = module_item_type
# REQUIRED - module_item[content_id]
"""The id of the content to link to the module item. Required, except for
'ExternalUrl', 'Page', and 'SubHeader' types."""
data["module_item[content_id]"] = module_item_content_id
# OPTIONAL - module_item[position]
"""The position of this item in the module (1-based)."""
if module_item_position is not None:
data["module_item[position]"] = module_item_position
# OPTIONAL - module_item[indent]
"""0-based indent level; module items may be indented to show a hierarchy"""
if module_item_indent is not None:
data["module_item[indent]"] = module_item_indent
# OPTIONAL - module_item[page_url]
"""Suffix for the linked wiki page (e.g. 'front-page'). Required for 'Page'
type."""
if module_item_page_url is not None:
data["module_item[page_url]"] = module_item_page_url
# OPTIONAL - module_item[external_url]
"""External url that the item points to. [Required for 'ExternalUrl' and
'ExternalTool' types."""
if module_item_external_url is not None:
data["module_item[external_url]"] = module_item_external_url
# OPTIONAL - module_item[new_tab]
"""Whether the external tool opens in a new tab. Only applies to
'ExternalTool' type."""
if module_item_new_tab is not None:
data["module_item[new_tab]"] = module_item_new_tab
# OPTIONAL - module_item[completion_requirement][type]
"""Completion requirement for this module item.
"must_view": Applies to all item types
"must_contribute": Only applies to "Assignment", "Discussion", and "Page" types
"must_submit", "min_score": Only apply to "Assignment" and "Quiz" types
Inapplicable types will be ignored"""
if module_item_completion_requirement_type is not None:
self._validate_enum(module_item_completion_requirement_type, ["must_view", "must_contribute", "must_submit"])
data["module_item[completion_requirement][type]"] = module_item_completion_requirement_type
# OPTIONAL - module_item[completion_requirement][min_score]
"""Minimum score required to complete. Required for completion_requirement
type 'min_score'."""
if module_item_completion_requirement_min_score is not None:
data["module_item[completion_requirement][min_score]"] = module_item_completion_requirement_min_score
self.logger.debug("POST /api/v1/courses/{course_id}/modules/{module_id}/items with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules/{module_id}/items".format(**path), data=data, params=params, single_item=True)
def update_module_item(self, id, course_id, module_id, module_item_completion_requirement_min_score=None, module_item_completion_requirement_type=None, module_item_external_url=None, module_item_indent=None, module_item_module_id=None, module_item_new_tab=None, module_item_position=None, module_item_published=None, module_item_title=None):
"""
Update a module item.
Update and return an existing module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - module_item[title]
"""The name of the module item"""
if module_item_title is not None:
data["module_item[title]"] = module_item_title
# OPTIONAL - module_item[position]
"""The position of this item in the module (1-based)"""
if module_item_position is not None:
data["module_item[position]"] = module_item_position
# OPTIONAL - module_item[indent]
"""0-based indent level; module items may be indented to show a hierarchy"""
if module_item_indent is not None:
data["module_item[indent]"] = module_item_indent
# OPTIONAL - module_item[external_url]
"""External url that the item points to. Only applies to 'ExternalUrl' type."""
if module_item_external_url is not None:
data["module_item[external_url]"] = module_item_external_url
# OPTIONAL - module_item[new_tab]
"""Whether the external tool opens in a new tab. Only applies to
'ExternalTool' type."""
if module_item_new_tab is not None:
data["module_item[new_tab]"] = module_item_new_tab
# OPTIONAL - module_item[completion_requirement][type]
"""Completion requirement for this module item.
"must_view": Applies to all item types
"must_contribute": Only applies to "Assignment", "Discussion", and "Page" types
"must_submit", "min_score": Only apply to "Assignment" and "Quiz" types
Inapplicable types will be ignored"""
if module_item_completion_requirement_type is not None:
self._validate_enum(module_item_completion_requirement_type, ["must_view", "must_contribute", "must_submit"])
data["module_item[completion_requirement][type]"] = module_item_completion_requirement_type
# OPTIONAL - module_item[completion_requirement][min_score]
"""Minimum score required to complete, Required for completion_requirement
type 'min_score'."""
if module_item_completion_requirement_min_score is not None:
data["module_item[completion_requirement][min_score]"] = module_item_completion_requirement_min_score
# OPTIONAL - module_item[published]
"""Whether the module item is published and visible to students."""
if module_item_published is not None:
data["module_item[published]"] = module_item_published
# OPTIONAL - module_item[module_id]
"""Move this item to another module by specifying the target module id here.
The target module must be in the same course."""
if module_item_module_id is not None:
data["module_item[module_id]"] = module_item_module_id
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}".format(**path), data=data, params=params, single_item=True)
def select_mastery_path(self, id, course_id, module_id, assignment_set_id=None, student_id=None):
"""
Select a mastery path.
Select a mastery path when module item includes several possible paths.
Requires Mastery Paths feature to be enabled. Returns a compound document
with the assignments included in the given path and any module items
related to those assignments
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - assignment_set_id
"""Assignment set chosen, as specified in the mastery_paths portion of the
context module item response"""
if assignment_set_id is not None:
data["assignment_set_id"] = assignment_set_id
# OPTIONAL - student_id
"""Which student the selection applies to. If not specified, current user is
implied."""
if student_id is not None:
data["student_id"] = student_id
self.logger.debug("POST /api/v1/courses/{course_id}/modules/{module_id}/items/{id}/select_mastery_path with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}/select_mastery_path".format(**path), data=data, params=params, no_data=True)
def delete_module_item(self, id, course_id, module_id):
"""
Delete module item.
Delete a module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("DELETE /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}".format(**path), data=data, params=params, single_item=True)
def mark_module_item_as_done_not_done(self, id, course_id, module_id):
"""
Mark module item as done/not done.
Mark a module item as done/not done. Use HTTP method PUT to mark as done,
and DELETE to mark as not done.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{module_id}/items/{id}/done with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}/done".format(**path), data=data, params=params, no_data=True)
def get_module_item_sequence(self, course_id, asset_id=None, asset_type=None):
"""
Get module item sequence.
Given an asset in a course, find the ModuleItem it belongs to, and also the previous and next Module Items
in the course sequence.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - asset_type
"""The type of asset to find module sequence information for. Use the ModuleItem if it is known
(e.g., the user navigated from a module item), since this will avoid ambiguity if the asset
appears more than once in the module sequence."""
if asset_type is not None:
self._validate_enum(asset_type, ["ModuleItem", "File", "Page", "Discussion", "Assignment", "Quiz", "ExternalTool"])
params["asset_type"] = asset_type
# OPTIONAL - asset_id
"""The id of the asset (or the url in the case of a Page)"""
if asset_id is not None:
params["asset_id"] = asset_id
self.logger.debug("GET /api/v1/courses/{course_id}/module_item_sequence with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/module_item_sequence".format(**path), data=data, params=params, single_item=True)
def mark_module_item_read(self, id, course_id, module_id):
"""
Mark module item read.
Fulfills "must view" requirement for a module item. It is generally not necessary to do this explicitly,
but it is provided for applications that need to access external content directly (bypassing the html_url
redirect that normally allows Canvas to fulfill "must view" requirements).
This endpoint cannot be used to complete requirements on locked or unpublished module items.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("POST /api/v1/courses/{course_id}/modules/{module_id}/items/{id}/mark_read with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}/mark_read".format(**path), data=data, params=params, no_data=True)
class Contentdetails(BaseModel):
"""Contentdetails Model."""
def __init__(self, unlock_at=None, due_at=None, points_possible=None, lock_info=None, lock_at=None, lock_explanation=None, locked_for_user=None):
"""Init method for Contentdetails class."""
self._unlock_at = unlock_at
self._due_at = due_at
self._points_possible = points_possible
self._lock_info = lock_info
self._lock_at = lock_at
self._lock_explanation = lock_explanation
self._locked_for_user = locked_for_user
self.logger = logging.getLogger('py3canvas.Contentdetails')
@property
def unlock_at(self):
"""unlock_at."""
return self._unlock_at
@unlock_at.setter
def unlock_at(self, value):
"""Setter for unlock_at property."""
self.logger.warn("Setting values on unlock_at will NOT update the remote Canvas instance.")
self._unlock_at = value
@property
def due_at(self):
"""due_at."""
return self._due_at
@due_at.setter
def due_at(self, value):
"""Setter for due_at property."""
self.logger.warn("Setting values on due_at will NOT update the remote Canvas instance.")
self._due_at = value
@property
def points_possible(self):
"""points_possible."""
return self._points_possible
@points_possible.setter
def points_possible(self, value):
"""Setter for points_possible property."""
self.logger.warn("Setting values on points_possible will NOT update the remote Canvas instance.")
self._points_possible = value
@property
def lock_info(self):
"""lock_info."""
return self._lock_info
@lock_info.setter
def lock_info(self, value):
"""Setter for lock_info property."""
self.logger.warn("Setting values on lock_info will NOT update the remote Canvas instance.")
self._lock_info = value
@property
def lock_at(self):
"""lock_at."""
return self._lock_at
@lock_at.setter
def lock_at(self, value):
"""Setter for lock_at property."""
self.logger.warn("Setting values on lock_at will NOT update the remote Canvas instance.")
self._lock_at = value
@property
def lock_explanation(self):
"""lock_explanation."""
return self._lock_explanation
@lock_explanation.setter
def lock_explanation(self, value):
"""Setter for lock_explanation property."""
self.logger.warn("Setting values on lock_explanation will NOT update the remote Canvas instance.")
self._lock_explanation = value
@property
def locked_for_user(self):
"""locked_for_user."""
return self._locked_for_user
@locked_for_user.setter
def locked_for_user(self, value):
"""Setter for locked_for_user property."""
self.logger.warn("Setting values on locked_for_user will NOT update the remote Canvas instance.")
self._locked_for_user = value
class Moduleitemsequenceasset(BaseModel):
"""Moduleitemsequenceasset Model."""
def __init__(self, module_id=None, type=None, id=None, title=None):
"""Init method for Moduleitemsequenceasset class."""
self._module_id = module_id
self._type = type
self._id = id
self._title = title
self.logger = logging.getLogger('py3canvas.Moduleitemsequenceasset')
@property
def module_id(self):
"""module_id."""
return self._module_id
@module_id.setter
def module_id(self, value):
"""Setter for module_id property."""
self.logger.warn("Setting values on module_id will NOT update the remote Canvas instance.")
self._module_id = value
@property
def type(self):
"""type."""
return self._type
@type.setter
def type(self, value):
"""Setter for type property."""
self.logger.warn("Setting values on type will NOT update the remote Canvas instance.")
self._type = value
@property
def id(self):
"""id."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def title(self):
"""title."""
return self._title
@title.setter
def title(self, value):
"""Setter for title property."""
self.logger.warn("Setting values on title will NOT update the remote Canvas instance.")
self._title = value
class Moduleitemcompletionrequirement(BaseModel):
"""Moduleitemcompletionrequirement Model."""
def __init__(self, min_score=None, type=None, completed=None):
"""Init method for Moduleitemcompletionrequirement class."""
self._min_score = min_score
self._type = type
self._completed = completed
self.logger = logging.getLogger('py3canvas.Moduleitemcompletionrequirement')
@property
def min_score(self):
"""min_score."""
return self._min_score
@min_score.setter
def min_score(self, value):
"""Setter for min_score property."""
self.logger.warn("Setting values on min_score will NOT update the remote Canvas instance.")
self._min_score = value
@property
def type(self):
"""type."""
return self._type
@type.setter
def type(self, value):
"""Setter for type property."""
self.logger.warn("Setting values on type will NOT update the remote Canvas instance.")
self._type = value
@property
def completed(self):
"""completed."""
return self._completed
@completed.setter
def completed(self, value):
"""Setter for completed property."""
self.logger.warn("Setting values on completed will NOT update the remote Canvas instance.")
self._completed = value
class Module(BaseModel):
"""Module Model."""
def __init__(self, completed_at=None, items_count=None, unlock_at=None, workflow_state=None, items=None, prerequisite_module_ids=None, state=None, publish_final_grade=None, position=None, items_url=None, id=None, require_sequential_progress=None, name=None):
"""Init method for Module class."""
self._completed_at = completed_at
self._items_count = items_count
self._unlock_at = unlock_at
self._workflow_state = workflow_state
self._items = items
self._prerequisite_module_ids = prerequisite_module_ids
self._state = state
self._publish_final_grade = publish_final_grade
self._position = position
self._items_url = items_url
self._id = id
self._require_sequential_progress = require_sequential_progress
self._name = name
self.logger = logging.getLogger('py3canvas.Module')
@property
def completed_at(self):
"""the date the calling user completed the module (Optional; present only if the caller is a student or if the optional parameter 'student_id' is included)."""
return self._completed_at
@completed_at.setter
def completed_at(self, value):
"""Setter for completed_at property."""
self.logger.warn("Setting values on completed_at will NOT update the remote Canvas instance.")
self._completed_at = value
@property
def items_count(self):
"""The number of items in the module."""
return self._items_count
@items_count.setter
def items_count(self, value):
"""Setter for items_count property."""
self.logger.warn("Setting values on items_count will NOT update the remote Canvas instance.")
self._items_count = value
@property
def unlock_at(self):
"""(Optional) the date this module will unlock."""
return self._unlock_at
@unlock_at.setter
def unlock_at(self, value):
"""Setter for unlock_at property."""
self.logger.warn("Setting values on unlock_at will NOT update the remote Canvas instance.")
self._unlock_at = value
@property
def workflow_state(self):
"""the state of the module: 'active', 'deleted'."""
return self._workflow_state
@workflow_state.setter
def workflow_state(self, value):
"""Setter for workflow_state property."""
self.logger.warn("Setting values on workflow_state will NOT update the remote Canvas instance.")
self._workflow_state = value
@property
def items(self):
"""The contents of this module, as an array of Module Items. (Present only if requested via include[]=items AND the module is not deemed too large by Canvas.)."""
return self._items
@items.setter
def items(self, value):
"""Setter for items property."""
self.logger.warn("Setting values on items will NOT update the remote Canvas instance.")
self._items = value
@property
def prerequisite_module_ids(self):
"""IDs of Modules that must be completed before this one is unlocked."""
return self._prerequisite_module_ids
@prerequisite_module_ids.setter
def prerequisite_module_ids(self, value):
"""Setter for prerequisite_module_ids property."""
self.logger.warn("Setting values on prerequisite_module_ids will NOT update the remote Canvas instance.")
self._prerequisite_module_ids = value
@property
def state(self):
"""The state of this Module for the calling user one of 'locked', 'unlocked', 'started', 'completed' (Optional; present only if the caller is a student or if the optional parameter 'student_id' is included)."""
return self._state
@state.setter
def state(self, value):
"""Setter for state property."""
self.logger.warn("Setting values on state will NOT update the remote Canvas instance.")
self._state = value
@property
def publish_final_grade(self):
"""if the student's final grade for the course should be published to the SIS upon completion of this module."""
return self._publish_final_grade
@publish_final_grade.setter
def publish_final_grade(self, value):
"""Setter for publish_final_grade property."""
self.logger.warn("Setting values on publish_final_grade will NOT update the remote Canvas instance.")
self._publish_final_grade = value
@property
def position(self):
"""the position of this module in the course (1-based)."""
return self._position
@position.setter
def position(self, value):
"""Setter for position property."""
self.logger.warn("Setting values on position will NOT update the remote Canvas instance.")
self._position = value
@property
def items_url(self):
"""The API URL to retrive this module's items."""
return self._items_url
@items_url.setter
def items_url(self, value):
"""Setter for items_url property."""
self.logger.warn("Setting values on items_url will NOT update the remote Canvas instance.")
self._items_url = value
@property
def id(self):
"""the unique identifier for the module."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def require_sequential_progress(self):
"""Whether module items must be unlocked in order."""
return self._require_sequential_progress
@require_sequential_progress.setter
def require_sequential_progress(self, value):
"""Setter for require_sequential_progress property."""
self.logger.warn("Setting values on require_sequential_progress will NOT update the remote Canvas instance.")
self._require_sequential_progress = value
@property
def name(self):
"""the name of this module."""
return self._name
@name.setter
def name(self, value):
"""Setter for name property."""
self.logger.warn("Setting values on name will NOT update the remote Canvas instance.")
self._name = value
class Moduleitemsequence(BaseModel):
"""Moduleitemsequence Model."""
def __init__(self, items=None, modules=None):
"""Init method for Moduleitemsequence class."""
self._items = items
self._modules = modules
self.logger = logging.getLogger('py3canvas.Moduleitemsequence')
@property
def items(self):
"""an array containing one hash for each appearence of the asset in the module sequence (up to 10 total)."""
return self._items
@items.setter
def items(self, value):
"""Setter for items property."""
self.logger.warn("Setting values on items will NOT update the remote Canvas instance.")
self._items = value
@property
def modules(self):
"""an array containing each Module referenced above."""
return self._modules
@modules.setter
def modules(self, value):
"""Setter for modules property."""
self.logger.warn("Setting values on modules will NOT update the remote Canvas instance.")
self._modules = value
class Completionrequirement(BaseModel):
"""Completionrequirement Model."""
def __init__(self, min_score=None, type=None, completed=None):
"""Init method for Completionrequirement class."""
self._min_score = min_score
self._type = type
self._completed = completed
self.logger = logging.getLogger('py3canvas.Completionrequirement')
@property
def min_score(self):
"""minimum score required to complete (only present when type == 'min_score')."""
return self._min_score
@min_score.setter
def min_score(self, value):
"""Setter for min_score property."""
self.logger.warn("Setting values on min_score will NOT update the remote Canvas instance.")
self._min_score = value
@property
def type(self):
"""one of 'must_view', 'must_submit', 'must_contribute', 'min_score'."""
return self._type
@type.setter
def type(self, value):
"""Setter for type property."""
self.logger.warn("Setting values on type will NOT update the remote Canvas instance.")
self._type = value
@property
def completed(self):
"""whether the calling user has met this requirement (Optional; present only if the caller is a student or if the optional parameter 'student_id' is included)."""
return self._completed
@completed.setter
def completed(self, value):
"""Setter for completed property."""
self.logger.warn("Setting values on completed will NOT update the remote Canvas instance.")
self._completed = value
class Moduleitem(BaseModel):
"""Moduleitem Model."""
def __init__(self, indent=None, title=None, url=None, completion_requirement=None, html_url=None, content_details=None, new_tab=None, external_url=None, position=None, module_id=None, content_id=None, type=None, id=None, page_url=None):
"""Init method for Moduleitem class."""
self._indent = indent
self._title = title
self._url = url
self._completion_requirement = completion_requirement
self._html_url = html_url
self._content_details = content_details
self._new_tab = new_tab
self._external_url = external_url
self._position = position
self._module_id = module_id
self._content_id = content_id
self._type = type
self._id = id
self._page_url = page_url
self.logger = logging.getLogger('py3canvas.Moduleitem')
@property
def indent(self):
"""0-based indent level; module items may be indented to show a hierarchy."""
return self._indent
@indent.setter
def indent(self, value):
"""Setter for indent property."""
self.logger.warn("Setting values on indent will NOT update the remote Canvas instance.")
self._indent = value
@property
def title(self):
"""the title of this item."""
return self._title
@title.setter
def title(self, value):
"""Setter for title property."""
self.logger.warn("Setting values on title will NOT update the remote Canvas instance.")
self._title = value
@property
def url(self):
"""(Optional) link to the Canvas API object, if applicable."""
return self._url
@url.setter
def url(self, value):
"""Setter for url property."""
self.logger.warn("Setting values on url will NOT update the remote Canvas instance.")
self._url = value
@property
def completion_requirement(self):
"""Completion requirement for this module item."""
return self._completion_requirement
@completion_requirement.setter
def completion_requirement(self, value):
"""Setter for completion_requirement property."""
self.logger.warn("Setting values on completion_requirement will NOT update the remote Canvas instance.")
self._completion_requirement = value
@property
def html_url(self):
"""link to the item in Canvas."""
return self._html_url
@html_url.setter
def html_url(self, value):
"""Setter for html_url property."""
self.logger.warn("Setting values on html_url will NOT update the remote Canvas instance.")
self._html_url = value
@property
def content_details(self):
"""(Present only if requested through include[]=content_details) If applicable, returns additional details specific to the associated object."""
return self._content_details
@content_details.setter
def content_details(self, value):
"""Setter for content_details property."""
self.logger.warn("Setting values on content_details will NOT update the remote Canvas instance.")
self._content_details = value
@property
def new_tab(self):
"""(only for 'ExternalTool' type) whether the external tool opens in a new tab."""
return self._new_tab
@new_tab.setter
def new_tab(self, value):
"""Setter for new_tab property."""
self.logger.warn("Setting values on new_tab will NOT update the remote Canvas instance.")
self._new_tab = value
@property
def external_url(self):
"""(only for 'ExternalUrl' and 'ExternalTool' types) external url that the item points to."""
return self._external_url
@external_url.setter
def external_url(self, value):
"""Setter for external_url property."""
self.logger.warn("Setting values on external_url will NOT update the remote Canvas instance.")
self._external_url = value
@property
def position(self):
"""the position of this item in the module (1-based)."""
return self._position
@position.setter
def position(self, value):
"""Setter for position property."""
self.logger.warn("Setting values on position will NOT update the remote Canvas instance.")
self._position = value
@property
def module_id(self):
"""the id of the Module this item appears in."""
return self._module_id
@module_id.setter
def module_id(self, value):
"""Setter for module_id property."""
self.logger.warn("Setting values on module_id will NOT update the remote Canvas instance.")
self._module_id = value
@property
def content_id(self):
"""the id of the object referred to applies to 'File', 'Discussion', 'Assignment', 'Quiz', 'ExternalTool' types."""
return self._content_id
@content_id.setter
def content_id(self, value):
"""Setter for content_id property."""
self.logger.warn("Setting values on content_id will NOT update the remote Canvas instance.")
self._content_id = value
@property
def type(self):
"""the type of object referred to one of 'File', 'Page', 'Discussion', 'Assignment', 'Quiz', 'SubHeader', 'ExternalUrl', 'ExternalTool'."""
return self._type
@type.setter
def type(self, value):
"""Setter for type property."""
self.logger.warn("Setting values on type will NOT update the remote Canvas instance.")
self._type = value
@property
def id(self):
"""the unique identifier for the module item."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def page_url(self):
"""(only for 'Page' type) unique locator for the linked wiki page."""
return self._page_url
@page_url.setter
def page_url(self, value):
"""Setter for page_url property."""
self.logger.warn("Setting values on page_url will NOT update the remote Canvas instance.")
self._page_url = value
class Moduleitemsequencenode(BaseModel):
"""Moduleitemsequencenode Model."""
def __init__(self, current=None, prev=None, next=None):
"""Init method for Moduleitemsequencenode class."""
self._current = current
self._prev = prev
self._next = next
self.logger = logging.getLogger('py3canvas.Moduleitemsequencenode')
@property
def current(self):
"""current."""
return self._current
@current.setter
def current(self, value):
"""Setter for current property."""
self.logger.warn("Setting values on current will NOT update the remote Canvas instance.")
self._current = value
@property
def prev(self):
"""prev."""
return self._prev
@prev.setter
def prev(self, value):
"""Setter for prev property."""
self.logger.warn("Setting values on prev will NOT update the remote Canvas instance.")
self._prev = value
@property
def next(self):
"""next."""
return self._next
@next.setter
def next(self, value):
"""Setter for next property."""
self.logger.warn("Setting values on next will NOT update the remote Canvas instance.")
self._next = value
class Moduleitemcontentdetails(BaseModel):
"""Moduleitemcontentdetails Model."""
def __init__(self, unlock_at=None, due_at=None, points_possible=None, lock_info=None, lock_at=None, lock_explanation=None, locked_for_user=None):
"""Init method for Moduleitemcontentdetails class."""
self._unlock_at = unlock_at
self._due_at = due_at
self._points_possible = points_possible
self._lock_info = lock_info
self._lock_at = lock_at
self._lock_explanation = lock_explanation
self._locked_for_user = locked_for_user
self.logger = logging.getLogger('py3canvas.Moduleitemcontentdetails')
@property
def unlock_at(self):
"""unlock_at."""
return self._unlock_at
@unlock_at.setter
def unlock_at(self, value):
"""Setter for unlock_at property."""
self.logger.warn("Setting values on unlock_at will NOT update the remote Canvas instance.")
self._unlock_at = value
@property
def due_at(self):
"""due_at."""
return self._due_at
@due_at.setter
def due_at(self, value):
"""Setter for due_at property."""
self.logger.warn("Setting values on due_at will NOT update the remote Canvas instance.")
self._due_at = value
@property
def points_possible(self):
"""points_possible."""
return self._points_possible
@points_possible.setter
def points_possible(self, value):
"""Setter for points_possible property."""
self.logger.warn("Setting values on points_possible will NOT update the remote Canvas instance.")
self._points_possible = value
@property
def lock_info(self):
"""lock_info."""
return self._lock_info
@lock_info.setter
def lock_info(self, value):
"""Setter for lock_info property."""
self.logger.warn("Setting values on lock_info will NOT update the remote Canvas instance.")
self._lock_info = value
@property
def lock_at(self):
"""lock_at."""
return self._lock_at
@lock_at.setter
def lock_at(self, value):
"""Setter for lock_at property."""
self.logger.warn("Setting values on lock_at will NOT update the remote Canvas instance.")
self._lock_at = value
@property
def lock_explanation(self):
"""lock_explanation."""
return self._lock_explanation
@lock_explanation.setter
def lock_explanation(self, value):
"""Setter for lock_explanation property."""
self.logger.warn("Setting values on lock_explanation will NOT update the remote Canvas instance.")
self._lock_explanation = value
@property
def locked_for_user(self):
"""locked_for_user."""
return self._locked_for_user
@locked_for_user.setter
def locked_for_user(self, value):
"""Setter for locked_for_user property."""
self.logger.warn("Setting values on locked_for_user will NOT update the remote Canvas instance.")
self._locked_for_user = value
| mit | 998,213,574,905,809,700 | 37.412935 | 354 | 0.624493 | false |
shubhamVerma/code-eval | Category - Easy/sumdigitsCodeEval.py | 1 | 1271 | '''
sumdigitsCodeEval.py - Solution to Problem Lowercase (Category - Easy)
Copyright (C) 2013, Shubham Verma
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
'''
Description:
Given a positive integer, find the sum of its constituent digits.
Input sample:
The first argument will be a text file containing positive integers, one per line.
e.g.
23
496
Output sample:
Print to stdout, the sum of the numbers that make up the integer, one per line.
e.g.
5
19
'''
import sys
if __name__ == '__main__':
f = open(sys.argv[1], 'r')
test_cases = f.read().split('\n')
for test_case in test_cases:
print sum( map(int, test_case) )
f.close() | gpl-3.0 | 5,175,259,314,744,266,000 | 22.555556 | 83 | 0.707317 | false |
kajgan/stbgui | lib/python/Components/Converter/ClientsStreaming.py | 1 | 3432 | from Converter import Converter
from Poll import Poll
from Components.Element import cached
from Components.Sources.StreamService import StreamServiceList
from enigma import eStreamServer
from ServiceReference import ServiceReference
import socket
class ClientsStreaming(Converter, Poll, object):
UNKNOWN = -1
REF = 0
IP = 1
NAME = 2
ENCODER = 3
NUMBER = 4
SHORT_ALL = 5
ALL = 6
INFO = 7
INFO_RESOLVE = 8
INFO_RESOLVE_SHORT = 9
EXTRA_INFO = 10
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
self.poll_interval = 30000
self.poll_enabled = True
if type == "REF":
self.type = self.REF
elif type == "IP":
self.type = self.IP
elif type == "NAME":
self.type = self.NAME
elif type == "ENCODER":
self.type = self.ENCODER
elif type == "NUMBER":
self.type = self.NUMBER
elif type == "SHORT_ALL":
self.type = self.SHORT_ALL
elif type == "ALL":
self.type = self.ALL
elif type == "INFO":
self.type = self.INFO
elif type == "INFO_RESOLVE":
self.type = self.INFO_RESOLVE
elif type == "INFO_RESOLVE_SHORT":
self.type = self.INFO_RESOLVE_SHORT
elif type == "EXTRA_INFO":
self.type = self.EXTRA_INFO
else:
self.type = self.UNKNOWN
self.streamServer = eStreamServer.getInstance()
@cached
def getText(self):
if self.streamServer is None:
return ""
clients = []
refs = []
ips = []
names = []
encoders = []
extrainfo = _("ClientIP") + "\t" + _("Transcode") + "\t" + _("Channel") + "\n"
info = ""
for x in self.streamServer.getConnectedClients():
refs.append((x[1]))
servicename = ServiceReference(x[1]).getServiceName() or "(unknown service)"
service_name = servicename
names.append((service_name))
ip = x[0]
ips.append((ip))
if int(x[2]) == 0:
strtype = "S"
encoder = _('NO')
else:
strtype = "T"
encoder = _('YES')
encoders.append((encoder))
if self.type == self.INFO_RESOLVE or self.type == self.INFO_RESOLVE_SHORT:
try:
raw = socket.gethostbyaddr(ip)
ip = raw[0]
except:
pass
if self.type == self.INFO_RESOLVE_SHORT:
ip, sep, tail = ip.partition('.')
info += ("%s %-8s %s\n") % (strtype, ip, service_name)
clients.append((ip, service_name, encoder))
extrainfo += ("%-8s\t%s\t%s") % (ip, encoder, service_name) +"\n"
if self.type == self.REF:
return ' '.join(refs)
elif self.type == self.IP:
return ' '.join(ips)
elif self.type == self.NAME:
return ' '.join(names)
elif self.type == self.ENCODER:
return _("Transcoding: ") + ' '.join(encoders)
elif self.type == self.NUMBER:
return str(len(clients))
elif self.type == self.EXTRA_INFO:
return extrainfo
elif self.type == self.SHORT_ALL:
return _("Total clients streaming: %d (%s)") % (len(clients), ' '.join(names))
elif self.type == self.ALL:
return '\n'.join(' '.join(elems) for elems in clients)
elif self.type == self.INFO or self.type == self.INFO_RESOLVE or self.type == self.INFO_RESOLVE_SHORT:
return info
else:
return "(unknown)"
return ""
text = property(getText)
@cached
def getBoolean(self):
if self.streamServer is None:
return False
return (self.streamServer.getConnectedClients() or StreamServiceList) and True or False
boolean = property(getBoolean)
def changed(self, what):
Converter.changed(self, (self.CHANGED_POLL,))
def doSuspend(self, suspended):
pass | gpl-2.0 | 5,481,053,068,278,078,000 | 23.347518 | 104 | 0.638986 | false |
viaict/viaduct | app/forms/pimpy.py | 1 | 1268 | import datetime
from flask_babel import _
from flask_login import current_user
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, DateTimeField, SelectField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import InputRequired, Optional
from app import constants
from app.service import group_service, pimpy_service
class AddTaskForm(FlaskForm):
name = StringField(_('Name'), validators=[InputRequired()])
content = TextAreaField(_('Content'), validators=[Optional()])
group = QuerySelectField(
_('Group'),
query_factory=lambda: group_service.get_groups_for_user(current_user),
get_label=lambda x: x.name)
users = StringField(_('Users'))
status = SelectField(_('Status'), coerce=int,
choices=pimpy_service.get_task_status_choices())
class AddMinuteForm(FlaskForm):
content = TextAreaField(_('Minute content'), validators=[InputRequired()])
group = QuerySelectField(
_('Group'),
query_factory=lambda: group_service.get_groups_for_user(current_user),
get_label=lambda x: x.name)
date = DateTimeField(_('Date'), format=constants.DATE_FORMAT,
default=datetime.date.today)
| mit | 2,035,201,200,967,457,500 | 36.294118 | 78 | 0.698738 | false |