code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from .linting import BackgroundLinter
from .completion import AnacondaCompletionEventListener
from .signatures import AnacondaSignaturesEventListener
from .autopep8 import AnacondaAutoformatPEP8EventListener
__all__ = [
'BackgroundLinter',
'AnacondaCompletionEventListener',
'AnacondaSignaturesEventListener',
'AnacondaAutoformatPEP8EventListener'
]
| danalec/dotfiles | sublime/.config/sublime-text-3/Packages/Anaconda/listeners/__init__.py | Python | mit | 497 |
import unittest
from evoplotter.dims import *
from evoplotter import printer
class TestsPrinter(unittest.TestCase):
x = "x"
r = "r"
c = "c"
data = [{r: 0, c: 0, x: 1},
{r: 0, c: 0, x: 2},
{r: 0, c: 0, x: 3},
{r: 0, c: 1, x: 4},
{r: 0, c: 1, x: 5},
{r: 0, c: 1, x: 6},
{r: 0, c: 2, x: 7},
{r: 0, c: 2, x: 8},
{r: 0, c: 2, x: 9},
{r: 1, c: 0, x: 11},
{r: 1, c: 0, x: 12},
{r: 1, c: 0, x: 13},
{r: 1, c: 1, x: 14},
{r: 1, c: 1, x: 15},
{r: 1, c: 1, x: 16},
{r: 1, c: 2, x: 17},
{r: 1, c: 2, x: 18},
{r: 1, c: 2, x: 19}]
dim_rows = Dim([Config("r0", lambda d: d["r"] == 0),
Config("r1", lambda d: d["r"] == 1)])
dim_cols = Dim([Config("c0", lambda d: d["c"] == 0),
Config("c1", lambda d: d["c"] == 1),
Config("c2", lambda d: d["c"] == 2)])
dim_z = Dim([Config("z0", lambda d: d["x"] < 5 ),
Config("z1", lambda d: d["x"] >= 5)])
def test_text_table_multichar_seps(self):
text = printer.text_table(self.data, self.dim_rows, self.dim_cols, lambda ds: sum([d["x"] for d in ds]), d_cols=" && ", d_rows=";\n")
self.assertEqual(" && c0 && c1 && c2;\n" + "r0 && 6 && 15 && 24;\n" + "r1 && 36 && 45 && 54;\n", text)
def test_text_table(self):
text = printer.text_table(self.data, self.dim_rows, self.dim_cols, lambda ds: sum([d["x"] for d in ds]))
self.assertEqual("\tc0\tc1\tc2\n" + "r0\t6\t15\t24\n" + "r1\t36\t45\t54\n", text)
text = printer.text_table(self.data, self.dim_cols, self.dim_rows, lambda ds: sum([d["x"] for d in ds]))
self.assertEqual("\tr0\tr1\n" + "c0\t6\t36\n" + "c1\t15\t45\n" + "c2\t24\t54\n", text)
def test_latex_table_vb0(self):
text = printer.latex_table(self.data, self.dim_rows, self.dim_cols, lambda ds: sum([d["x"] for d in ds]),
vertical_border=0)
text = self.clear_multicols(text)
self.assertEqual(r"\begin{tabular}{lccc}" + "\n"
r"\hline" + "\n" +
r" & c0 & c1 & c2\\" + "\n" +
r"\hline" + "\n" +
r"r0 & 6 & 15 & 24\\"+"\n" + r"r1 & 36 & 45 & 54\\" + "\n" +
r"\hline" + "\n" +
r"\end{tabular}" + "\n", text)
def test_latex_table_vb1(self):
text = printer.latex_table(self.data, self.dim_rows, self.dim_cols, lambda ds: sum([d["x"] for d in ds]),
vertical_border=1, first_col_align="c")
text = self.clear_multicols(text)
self.assertEqual(r"\begin{tabular}{|c|ccc|}" + "\n"
r"\hline" + "\n" +
r" & c0 & c1 & c2\\" + "\n" +
r"\hline" + "\n" +
r"r0 & 6 & 15 & 24\\"+"\n" + r"r1 & 36 & 45 & 54\\" + "\n" +
r"\hline" + "\n" +
r"\end{tabular}" + "\n", text)
def test_latex_table_vb2(self):
text = printer.latex_table(self.data, self.dim_rows, self.dim_cols, lambda ds: sum([d["x"] for d in ds]),
vertical_border=2, first_col_align="r")
text = self.clear_multicols(text)
self.assertEqual(r"\begin{tabular}{|r|c|c|c|}" + "\n"
r"\hline" + "\n" +
r" & c0 & c1 & c2\\" + "\n" +
r"\hline" + "\n" +
r"r0 & 6 & 15 & 24\\"+"\n" + r"r1 & 36 & 45 & 54\\" + "\n" +
r"\hline" + "\n" +
r"\end{tabular}" + "\n", text)
def clear_multicols(self, s):
"""Replaces \multicolumn markers and places only the content."""
return s.replace("\multicolumn{1}{c}", "").replace("{c0}", "c0").replace("{c1}", "c1").replace("{c2}", "c2")
def test_latex_table_header_multilayered_1(self):
text = printer.latex_table_header_multilayered(self.dim_cols)
text = self.clear_multicols(text)
self.assertEqual(r" & c0 & c1 & c2\\" + "\n" + r"\hline" + "\n", text)
def test_latex_table_header_multilayered_2(self):
dim = self.dim_rows * self.dim_cols
text = printer.latex_table_header_multilayered(dim)
text = self.clear_multicols(text)
self.assertEqual(r" & \multicolumn{3}{c}{r0} & \multicolumn{3}{c}{r1}\\" + "\n" +
r" & c0 & c1 & c2 & c0 & c1 & c2\\" + "\n" + r"\hline" + "\n", text)
dim = self.dim_rows * self.dim_cols
dim = Dim(dim.configs[:-1])
text = printer.latex_table_header_multilayered(dim)
text = self.clear_multicols(text)
self.assertEqual(r" & \multicolumn{3}{c}{r0} & \multicolumn{2}{c}{r1}\\" + "\n" +
r" & c0 & c1 & c2 & c0 & c1\\" + "\n" + r"\hline" + "\n", text)
def test_latex_table_header_multilayered_3(self):
dim = self.dim_z * self.dim_rows * self.dim_cols
text = printer.latex_table_header_multilayered(dim)
text = self.clear_multicols(text)
self.assertEqual(r" & \multicolumn{6}{c}{z0} & \multicolumn{6}{c}{z1}\\" + "\n" +
r" & \multicolumn{3}{c}{r0} & \multicolumn{3}{c}{r1} & \multicolumn{3}{c}{r0} & \multicolumn{3}{c}{r1}\\" + "\n" +
r" & c0 & c1 & c2 & c0 & c1 & c2 & c0 & c1 & c2 & c0 & c1 & c2\\" + "\n" + r"\hline" + "\n", text)
dim = self.dim_z * self.dim_rows * self.dim_cols
dim = Dim(dim.configs[:-1])
text = printer.latex_table_header_multilayered(dim)
text = self.clear_multicols(text)
self.assertEqual(r" & \multicolumn{6}{c}{z0} & \multicolumn{5}{c}{z1}\\" + "\n" +
r" & \multicolumn{3}{c}{r0} & \multicolumn{3}{c}{r1} & \multicolumn{3}{c}{r0} & \multicolumn{2}{c}{r1}\\" + "\n" +
r" & c0 & c1 & c2 & c0 & c1 & c2 & c0 & c1 & c2 & c0 & c1\\" + "\n" + r"\hline" + "\n", text)
def test_text_listing(self):
dim = self.dim_rows * self.dim_cols
text = printer.text_listing(self.data, dim, lambda ds: sum([d["x"] for d in ds]), d_configs="\n\n")
self.assertEqual("(*) CONFIG: r0/c0\n6\n\n" + "(*) CONFIG: r0/c1\n15\n\n" + "(*) CONFIG: r0/c2\n24\n\n" +
"(*) CONFIG: r1/c0\n36\n\n" + "(*) CONFIG: r1/c1\n45\n\n" + "(*) CONFIG: r1/c2\n54\n\n", text)
def test_decorate(self):
text = r"""0 & 5 & 10\\
10 & 5 & 0\\
"""
res = printer.decorate_table(text, lambda x: "#{0}#".format(x), d_cols=" & ", d_rows="\\\\\n")
self.assertEqual("#0# & #5# & #10#\\\\\n#10# & #5# & #0#\\\\\n", res)
def test_table_color_map(self):
text = r"""0 & 5 & 10\\
20 & -5 & 0\\
"""
MinNumber = 0
MaxNumber = 10
MidNumber = 5 # MaxNumber / 2
MinColor = "green"
MidColor = "yellow"
MaxColor = "red"
text = printer.table_color_map(text, MinNumber, MidNumber, MaxNumber, MinColor, MidColor, MaxColor)
self.assertEqual("\cellcolor{green!100.0!yellow}0 & \cellcolor{red!0.0!yellow}5 & \cellcolor{red!100.0!yellow}10\\\\\n"+
"\cellcolor{red!100.0!yellow}20 & \cellcolor{green!100.0!yellow}-5 & \cellcolor{green!100.0!yellow}0\\\\\n", text)
| iwob/evoplotter | tests/test_printer.py | Python | mit | 7,474 |
import os
import xbmcgui
import xbmc
import time
import urllib
class Downloader:
def __init__(self,):
pass
def download(self,path,url,name):
if os.path.isfile(path) is True:
while os.path.exists(path):
try: os.remove(path); break
except: pass
dp = xbmcgui.DialogProgress()
dp.create('EUROPEIPTV Downloader')
dp.update(0,name)
xbmc.sleep(500)
start_time = time.time()
urllib.URLopener.version = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:43.0) Gecko/20100101 Firefox/43.0'
try:
urllib.urlretrieve(url, path, lambda nb, bs, fs: self.dialogdown(name,nb, bs, fs, dp, start_time))
dp.close()
return True
except:
while os.path.exists(path):
try: os.remove(path); break
except: pass
dp.close()
return False
def dialogdown(self,name,numblocks, blocksize, filesize, dp, start_time):
try:
percent = min(numblocks * blocksize * 100 / filesize, 100)
currently_downloaded = float(numblocks) * blocksize / (1024 * 1024)
kbps_speed = numblocks * blocksize / (time.time() - start_time)
if kbps_speed > 0: eta = (filesize - numblocks * blocksize) / kbps_speed
else: eta = 0
kbps_speed = kbps_speed / 1024
total = float(filesize) / (1024 * 1024)
mbs = '%.02f MB %s %.02f MB' % (currently_downloaded,'downloaded', total)
e = ' (%.0f Kb/s) ' % kbps_speed
tempo = 'Tempo:' + ' %02d:%02d' % divmod(eta, 60)
dp.update(percent,name +' - '+ mbs + e,tempo)
except:
percent = 100
dp.update(percent)
if dp.iscanceled():
dp.close()
raise StopDownloading('Stopped Downloading')
class StopDownloading(Exception):
def __init__(self, value): self.value = value
def __str__(self): return repr(self.value)
| repotvsupertuga/tvsupertuga.repository | script.premium.TVsupertuga/resources/lib/Downloader.py | Python | gpl-2.0 | 2,159 |
"""Callback-based event and input handling.
---NODOC---
TODO:
[ESSENTIAL]
- BUG: button events don't really do the right thing
- should track real held state from held state of all inputs, and only send up/down if it actually changes
- BUG: input normalisation doesn't work (button held state)
- state that inputs/events should only have one parent (maybe protect against it)
- eh.grab (and maybe have grab toggle for getting all input for a while)
- eh.set_{thresholds,bdys} like deadzones (but also allow global, and same with deadzones)
- make them setters
- think of a nicer system for it (some sort of InputFilter, then {filter: value}?)
- or just have .find_inputs then user can do it manually?
- Event.disable()/enable()
- config: can do (per-device, per-device_id/var or global) deadzones/thresholds/bdy (can already do per-input, right?)
- conffile.generate{,_s}, eh.save{,_s}
- config: domain filenames
- try loading from a homedir one first, then fall back to the distributed one
- save to the homedir one
[FUTURE]
- UI buttons (+ other controls?)
- are ButtonInput
- have any number of moveable 'cursors' in eh, with .click(), .move_by(), .move_to
- can attach to relaxis events
- region: InputRect, InputArea subclass
- have a ButtonInput for cursors being in a region (specialised subclasses for mice, etc.)
- UI button is this combined with another ButtonInput (eg. mouse click)
- have input combiners
- how to use in cfg (since they might be dynamic, and use input combiners)?
- eh.postpone(), Event.postpone()
- eh.detect_pads() (make sure to re-initialise already-initialised ones)
- Scheme
- tools for editing/typing text
- input recording and playback (allow white/blacklisting by domain/registered event name)
- eh.*monitor_deadzones
- a way to register new input/event types (consider module data structures)
- document using __str__ backends
- working with config (evts/inputs have .args_from_config(*words))
- joy ball (seems like RelAxisInput, but need a pad with a ball to test)
- or maybe just do it and include a warning
[config]
- support for events as inputs
- input groups for having the same inputs in different events, eg.
[next]
kbd ENTER
kbd KP_RETURN
kbd SPACE
button next DOWN REPEAT .3 .1
[next]
kbd RIGHT
button confirm DOWN
[next]
[MP example]
button pause DOWN
kbd ESCAPE
pad button 1
axis2 moveK1
left kbd LEFT
right kbd RIGHT
up kbd UP
down kbd DOWN
axis2 moveK2
left kbd a
right kbd d
up kbd w
down kbd s
axis2 moveC
left right pad <x> axis 0
up down pad <x> axis 1 .1
axis2 imoveC
left right pad <x> axis 0
down up pad <x> axis 1 .1
button fire1
kbd rctrl
button fire2
kbd space
button fire3
pad button 0
scheme play
# must have the same number of options in each field
move moveK1 moveK2 moveC # if no more args, take everything this prefixes, and sort
fire fire1 fire2 fire3 # or could do this; here, order is fixed
----
eh['pause'].cb(pause)
# call function move() with the player from players above followed by
# (horizontal, vertical) axis positions (added via scheme 'play')
eh['move'].cb(move)
# create n_players control schemes with priorities favouring gamepad over WASD
# over arrow keys
# players is list of ({action: action_id}, {device_var: device})
# priorities are high to low; omitted ones don't get used
players = eh['play'].distribute(n_players, 'C', 'K2', 'K1')
---NODOC---
"""
from .handler import *
from .inputs import *
from .evts import *
from . import conffile
| ikn/pygame-template | game/engine/evt/__init__.py | Python | bsd-3-clause | 3,707 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Rico Sennrich <sennrich@cl.uzh.ch>
from __future__ import division
import sys
import os
import random
import math
import operator
from subprocess import Popen, PIPE
from collections import defaultdict
from config import LMPLZ_CMD
def euclidean_distance(v1, v2):
total = 0
for dim in v1:
total += (v1[dim] - v2[dim])**2
total **= 1/len(v1)
return total
def dot_product(v1,v2):
dp = 0
for dim in v1:
if dim in v2:
dp += v1[dim]*v2[dim]
return dp
def cosine(v1,v2):
try:
return dot_product(v1,v2) / math.sqrt(dot_product(v1,v1)*dot_product(v2,v2))
except ZeroDivisionError:
return 0
# Distance between vector and centroid: implemented: cosine, euclidean_distance
DISTANCE_FUNCTION = cosine
#needs to correspond to distance function; maximize for cosine, minimize for euclidean_distance
MAXIMIZE = True
class Cluster(object):
def __init__(self, lms, textfile_s, textfile_t, num_clusters, goldclusters = None, f_distance=DISTANCE_FUNCTION, maximize = MAXIMIZE, general_lm = None, working_dir = ''):
if textfile_s:
self.text_s = open(textfile_s).readlines()
self.n = len(self.text_s)
if textfile_t:
self.text_t = open(textfile_t).readlines()
self.num_clusters = num_clusters
self.goldclusters = goldclusters
self.lms = lms
self.f_distance = f_distance
self.general_lm = general_lm
self.working_dir = working_dir
if maximize:
self.neutral = float('-inf')
self.better_than = operator.gt
else:
self.neutral = float('inf')
self.better_than = operator.lt
def score_lms(self, lms, text):
scores = defaultdict(dict)
# cross-entropy difference according to Moore & Lewis 2010
if self.general_lm:
general_scores = self.general_lm.get_perplexity(text)
for i, lm in enumerate(lms):
lm_scores = lm.get_perplexity(text)
for j, score in enumerate(lm_scores):
scores[j][i] = score
if self.general_lm:
scores[j][i] -= general_scores[j]
return scores
def kmeans(self):
scores = self.score_lms(self.lms, '\n'.join(self.text_s))
centroids = self.random_centroids(scores)
total_distance = self.neutral
i = 0
while total_distance == self.neutral or self.better_than(total_distance,old_total_distance):
old_total_distance = total_distance
clusters, total_distance = self.assign(centroids, scores)
centroids = self.calc_centroids(clusters, scores)
sys.stderr.write('Iteration {0}\n'.format(i))
sys.stderr.write('Avg. distance/similarity to centroids: {0}\n'.format(total_distance/self.n))
if self.goldclusters:
entropy = self.calc_gold_entropy(clusters)
sys.stderr.write('gold entropy: {0}\n'.format(entropy))
i += 1
return clusters, centroids
def assign(self, centroids, scores):
"""expectation step: given centroids, assign each sentence to closest cluster"""
clusters = defaultdict(set)
total_distance = 0
for sentence, vector in scores.items():
best = self.neutral
bestcluster = None
for c, centroid in centroids.items():
d = self.f_distance(centroid, vector)
if self.better_than(d, best):
bestcluster = c
best = d
if not bestcluster is None:
clusters[bestcluster].add(sentence)
total_distance += best
else:
sys.stderr.write('No cluster found (why???)\n')
return clusters, total_distance
def calc_distance(self, clusters, scores):
"""keep clusters as they are, recalculate centroids and distance of each data point to centroid"""
centroids = self.calc_centroids(clusters, scores)
total_distance = 0
for c in clusters:
for sentence in clusters[c]:
vector = scores[sentence]
total_distance += self.f_distance(centroids[c], vector)
return total_distance
def random_centroids(self, scores):
"""random initialisation of centroids"""
sample = random.sample(scores.keys(), self.num_clusters)
centroids = {}
for i in range(self.num_clusters):
centroids[i] = scores[sample[i]]
return centroids
def calc_centroids(self, clusters, scores):
"""maximization step: calculate centroids from cluster members"""
centroids = {}
for c in clusters:
centroid = defaultdict(float)
for sentence in clusters[c]:
for feature, value in scores[sentence].items():
centroid[feature] += value
for feature in centroid:
centroid[feature] /= len(clusters[c])
centroids[c] = centroid
return centroids
def calc_gold_entropy(self, clusters):
"""given a set of true (gold) clusters, calculate entropy (the lower, the more similar the unsupervised clusters are to the gold clusters)"""
entropy = 0
for c in clusters:
entropy_cluster = 0
for gc in self.goldclusters.values():
prob = len(gc.intersection(clusters[c])) / len(clusters[c])
if prob:
entropy_cluster += -prob*math.log(prob,2)
entropy += entropy_cluster * len(clusters[c])/self.n
return entropy
def writedown(self, clusters):
for i in range(self.num_clusters):
out_s = open(os.path.join(self.working_dir,"{0}.s".format(i)),'w')
out_t = open(os.path.join(self.working_dir,"{0}.t".format(i)),'w')
for sentence in clusters[i]:
out_s.write(self.text_s[sentence])
out_t.write(self.text_t[sentence])
out_s.close()
out_t.close()
def write_persistent_data(self, clusters, centroids, f):
"""write some statistics to file for later re-use (LM paths, config options, centroids, which sentence is assigned to which cluster)"""
fobj = open(os.path.join(self.working_dir,f),'w')
fobj.write('LMs:\n')
for lm in self.lms:
fobj.write(lm.name + '\n')
fobj.write('\n')
if self.general_lm:
fobj.write('General_LM:\n' + self.general_lm.name + '\n\n')
fobj.write('Distance:\n' + self.f_distance.__name__ + '\n\n')
if self.better_than == operator.gt:
maximize = '1'
else:
maximize = '0'
fobj.write('Maximize:\n' + maximize + '\n\n')
fobj.write('Centroids:\n')
for c in centroids:
fobj.write(' '.join([str(centroids[c][f]) for f in sorted(centroids[c])]) + '\n')
fobj.write('\n')
fobj.write('Clusters:\n')
for c in clusters:
fobj.write(' '.join([str(sent) for sent in sorted(clusters[c])]) + '\n')
fobj.close()
class LM_interface(object):
"""abstract class; use either SRILM_interface or KENLM_interface"""
def get_perplexity(self, text):
cmd = [self.ppl_cmd] + self.ppl_options + self.options
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=open('/dev/null','w'))
output = p.communicate(text)[0]
scores = []
# read sentence length and log-likelihood from SRILM output
for k,line in enumerate(output.split('\n')):
if k % 4 == 0 and line.startswith('file -:'):
break
elif k % 4 == 1:
length = int(line.split()[2])
elif k % 4 == 2:
j = k // 4
scores.append(-(float(line.split()[3]))/length)
return scores
class SRILM_interface(LM_interface):
"""use SRILM for language model training / querying
"""
def __init__(self, lm, order=1, text=None):
self.training_cmd = 'ngram-count'
self.ppl_cmd = 'ngram'
self.training_options = ['-interpolate', '-kndiscount']
self.ppl_options = ['-debug', '1', '-ppl', '-']
self.options = ['-order', str(order), '-unk', '-lm', lm]
self.name = lm
if text and not os.path.exists(lm):
self.train(text)
def train(self, text):
cmd = [self.training_cmd] + self.training_options + self.options + ['-text', text]
sys.stderr.write('Training LM\n')
sys.stderr.write(' '.join(cmd) + '\n')
p = Popen(cmd)
p.wait()
class KENLM_interface(LM_interface):
"""use Ken's tools for language model training / querying.
./ngram is a wrapper around query that emulates SRILM output
"""
from config import LMPLZ_CMD
def __init__(self, lm, order=1, text=None):
self.training_cmd = LMPLZ_CMD
self.ppl_cmd = './ngram'
self.training_options = ['-S', '50%']
self.ppl_options = ['-debug', '1', '-ppl', '-', '-lm', lm]
self.options = ['-o', str(order)]
self.name = lm
if text and not os.path.exists(lm):
self.train(text)
def train(self, text):
cmd = [self.training_cmd] + self.training_options + self.options
sys.stderr.write('Training LM\n')
sys.stderr.write(' '.join(cmd) + '\n')
text = open(text,'r')
lm = open(self.name,'w')
p = Popen(cmd, stdin = text, stdout = lm)
p.wait()
text.close()
lm.close() | rsennrich/multidomain_smt | cluster.py | Python | gpl-2.0 | 9,750 |
""" feather-format compat """
from distutils.version import LooseVersion
from pandas import DataFrame, Int64Index, RangeIndex
from pandas.compat import range
from pandas.io.common import _stringify_path
def _try_import():
# since pandas is a dependency of feather
# we need to import on first use
try:
import feather
except ImportError:
# give a nice error message
raise ImportError("the feather-format library is not installed\n"
"you can install via conda\n"
"conda install feather-format -c conda-forge\n"
"or via pip\n"
"pip install -U feather-format\n")
try:
LooseVersion(feather.__version__) >= LooseVersion('0.3.1')
except AttributeError:
raise ImportError("the feather-format library must be >= "
"version 0.3.1\n"
"you can install via conda\n"
"conda install feather-format -c conda-forge"
"or via pip\n"
"pip install -U feather-format\n")
return feather
def to_feather(df, path):
"""
Write a DataFrame to the feather-format
Parameters
----------
df : DataFrame
path : string file path, or file-like object
"""
path = _stringify_path(path)
if not isinstance(df, DataFrame):
raise ValueError("feather only support IO with DataFrames")
feather = _try_import()
valid_types = {'string', 'unicode'}
# validate index
# --------------
# validate that we have only a default index
# raise on anything else as we don't serialize the index
if not isinstance(df.index, Int64Index):
raise ValueError("feather does not support serializing {} "
"for the index; you can .reset_index()"
"to make the index into column(s)".format(
type(df.index)))
if not df.index.equals(RangeIndex.from_range(range(len(df)))):
raise ValueError("feather does not support serializing a "
"non-default index for the index; you "
"can .reset_index() to make the index "
"into column(s)")
if df.index.name is not None:
raise ValueError("feather does not serialize index meta-data on a "
"default index")
# validate columns
# ----------------
# must have value column names (strings only)
if df.columns.inferred_type not in valid_types:
raise ValueError("feather must have string column names")
feather.write_dataframe(df, path)
def read_feather(path, nthreads=1):
"""
Load a feather-format object from the file path
.. versionadded 0.20.0
Parameters
----------
path : string file path, or file-like object
nthreads : int, default 1
Number of CPU threads to use when reading to pandas.DataFrame
.. versionadded 0.21.0
Returns
-------
type of object stored in file
"""
feather = _try_import()
path = _stringify_path(path)
if LooseVersion(feather.__version__) < LooseVersion('0.4.0'):
return feather.read_dataframe(path)
return feather.read_dataframe(path, nthreads=nthreads)
| amolkahat/pandas | pandas/io/feather_format.py | Python | bsd-3-clause | 3,371 |
# -*- coding:utf-8 -*-
class GameWorld:
def __init__(self):
pass
| dennisding/ether | game/game_world.py | Python | apache-2.0 | 69 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for Spark ML Python APIs.
"""
import sys
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark.tests import ReusedPySparkTestCase as PySparkTestCase
from pyspark.sql import DataFrame, SQLContext, Row
from pyspark.sql.functions import rand
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.param import Param, Params
from pyspark.ml.param.shared import HasMaxIter, HasInputCol, HasSeed
from pyspark.ml.util import keyword_only
from pyspark.ml import Estimator, Model, Pipeline, Transformer
from pyspark.ml.feature import *
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator, CrossValidatorModel
from pyspark.mllib.linalg import DenseVector
class MockDataset(DataFrame):
def __init__(self):
self.index = 0
class HasFake(Params):
def __init__(self):
super(HasFake, self).__init__()
self.fake = Param(self, "fake", "fake param")
def getFake(self):
return self.getOrDefault(self.fake)
class MockTransformer(Transformer, HasFake):
def __init__(self):
super(MockTransformer, self).__init__()
self.dataset_index = None
def _transform(self, dataset):
self.dataset_index = dataset.index
dataset.index += 1
return dataset
class MockEstimator(Estimator, HasFake):
def __init__(self):
super(MockEstimator, self).__init__()
self.dataset_index = None
def _fit(self, dataset):
self.dataset_index = dataset.index
model = MockModel()
self._copyValues(model)
return model
class MockModel(MockTransformer, Model, HasFake):
pass
class PipelineTests(PySparkTestCase):
def test_pipeline(self):
dataset = MockDataset()
estimator0 = MockEstimator()
transformer1 = MockTransformer()
estimator2 = MockEstimator()
transformer3 = MockTransformer()
pipeline = Pipeline(stages=[estimator0, transformer1, estimator2, transformer3])
pipeline_model = pipeline.fit(dataset, {estimator0.fake: 0, transformer1.fake: 1})
model0, transformer1, model2, transformer3 = pipeline_model.stages
self.assertEqual(0, model0.dataset_index)
self.assertEqual(0, model0.getFake())
self.assertEqual(1, transformer1.dataset_index)
self.assertEqual(1, transformer1.getFake())
self.assertEqual(2, dataset.index)
self.assertIsNone(model2.dataset_index, "The last model shouldn't be called in fit.")
self.assertIsNone(transformer3.dataset_index,
"The last transformer shouldn't be called in fit.")
dataset = pipeline_model.transform(dataset)
self.assertEqual(2, model0.dataset_index)
self.assertEqual(3, transformer1.dataset_index)
self.assertEqual(4, model2.dataset_index)
self.assertEqual(5, transformer3.dataset_index)
self.assertEqual(6, dataset.index)
class TestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(TestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
class OtherTestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(OtherTestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
class ParamTests(PySparkTestCase):
def test_param(self):
testParams = TestParams()
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
self.assertTrue(maxIter.parent == testParams.uid)
def test_hasparam(self):
testParams = TestParams()
self.assertTrue(all([testParams.hasParam(p.name) for p in testParams.params]))
self.assertFalse(testParams.hasParam("notAParameter"))
def test_params(self):
testParams = TestParams()
maxIter = testParams.maxIter
inputCol = testParams.inputCol
seed = testParams.seed
params = testParams.params
self.assertEqual(params, [inputCol, maxIter, seed])
self.assertTrue(testParams.hasParam(maxIter.name))
self.assertTrue(testParams.hasDefault(maxIter))
self.assertFalse(testParams.isSet(maxIter))
self.assertTrue(testParams.isDefined(maxIter))
self.assertEqual(testParams.getMaxIter(), 10)
testParams.setMaxIter(100)
self.assertTrue(testParams.isSet(maxIter))
self.assertEqual(testParams.getMaxIter(), 100)
self.assertTrue(testParams.hasParam(inputCol.name))
self.assertFalse(testParams.hasDefault(inputCol))
self.assertFalse(testParams.isSet(inputCol))
self.assertFalse(testParams.isDefined(inputCol))
with self.assertRaises(KeyError):
testParams.getInputCol()
# Since the default is normally random, set it to a known number for debug str
testParams._setDefault(seed=41)
testParams.setSeed(43)
self.assertEqual(
testParams.explainParams(),
"\n".join(["inputCol: input column name. (undefined)",
"maxIter: max number of iterations (>= 0). (default: 10, current: 100)",
"seed: random seed. (default: 41, current: 43)"]))
def test_hasseed(self):
noSeedSpecd = TestParams()
withSeedSpecd = TestParams(seed=42)
other = OtherTestParams()
# Check that we no longer use 42 as the magic number
self.assertNotEqual(noSeedSpecd.getSeed(), 42)
origSeed = noSeedSpecd.getSeed()
# Check that we only compute the seed once
self.assertEqual(noSeedSpecd.getSeed(), origSeed)
# Check that a specified seed is honored
self.assertEqual(withSeedSpecd.getSeed(), 42)
# Check that a different class has a different seed
self.assertNotEqual(other.getSeed(), noSeedSpecd.getSeed())
class FeatureTests(PySparkTestCase):
def test_binarizer(self):
b0 = Binarizer()
self.assertListEqual(b0.params, [b0.inputCol, b0.outputCol, b0.threshold])
self.assertTrue(all([~b0.isSet(p) for p in b0.params]))
self.assertTrue(b0.hasDefault(b0.threshold))
self.assertEqual(b0.getThreshold(), 0.0)
b0.setParams(inputCol="input", outputCol="output").setThreshold(1.0)
self.assertTrue(all([b0.isSet(p) for p in b0.params]))
self.assertEqual(b0.getThreshold(), 1.0)
self.assertEqual(b0.getInputCol(), "input")
self.assertEqual(b0.getOutputCol(), "output")
b0c = b0.copy({b0.threshold: 2.0})
self.assertEqual(b0c.uid, b0.uid)
self.assertListEqual(b0c.params, b0.params)
self.assertEqual(b0c.getThreshold(), 2.0)
b1 = Binarizer(threshold=2.0, inputCol="input", outputCol="output")
self.assertNotEqual(b1.uid, b0.uid)
self.assertEqual(b1.getThreshold(), 2.0)
self.assertEqual(b1.getInputCol(), "input")
self.assertEqual(b1.getOutputCol(), "output")
def test_idf(self):
sqlContext = SQLContext(self.sc)
dataset = sqlContext.createDataFrame([
(DenseVector([1.0, 2.0]),),
(DenseVector([0.0, 1.0]),),
(DenseVector([3.0, 0.2]),)], ["tf"])
idf0 = IDF(inputCol="tf")
self.assertListEqual(idf0.params, [idf0.inputCol, idf0.minDocFreq, idf0.outputCol])
idf0m = idf0.fit(dataset, {idf0.outputCol: "idf"})
self.assertEqual(idf0m.uid, idf0.uid,
"Model should inherit the UID from its parent estimator.")
output = idf0m.transform(dataset)
self.assertIsNotNone(output.head().idf)
def test_ngram(self):
sqlContext = SQLContext(self.sc)
dataset = sqlContext.createDataFrame([
Row(input=["a", "b", "c", "d", "e"])])
ngram0 = NGram(n=4, inputCol="input", outputCol="output")
self.assertEqual(ngram0.getN(), 4)
self.assertEqual(ngram0.getInputCol(), "input")
self.assertEqual(ngram0.getOutputCol(), "output")
transformedDF = ngram0.transform(dataset)
self.assertEqual(transformedDF.head().output, ["a b c d", "b c d e"])
def test_stopwordsremover(self):
sqlContext = SQLContext(self.sc)
dataset = sqlContext.createDataFrame([Row(input=["a", "panda"])])
stopWordRemover = StopWordsRemover(inputCol="input", outputCol="output")
# Default
self.assertEqual(stopWordRemover.getInputCol(), "input")
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, ["panda"])
# Custom
stopwords = ["panda"]
stopWordRemover.setStopWords(stopwords)
self.assertEqual(stopWordRemover.getInputCol(), "input")
self.assertEqual(stopWordRemover.getStopWords(), stopwords)
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, ["a"])
class HasInducedError(Params):
def __init__(self):
super(HasInducedError, self).__init__()
self.inducedError = Param(self, "inducedError",
"Uniformly-distributed error added to feature")
def getInducedError(self):
return self.getOrDefault(self.inducedError)
class InducedErrorModel(Model, HasInducedError):
def __init__(self):
super(InducedErrorModel, self).__init__()
def _transform(self, dataset):
return dataset.withColumn("prediction",
dataset.feature + (rand(0) * self.getInducedError()))
class InducedErrorEstimator(Estimator, HasInducedError):
def __init__(self, inducedError=1.0):
super(InducedErrorEstimator, self).__init__()
self._set(inducedError=inducedError)
def _fit(self, dataset):
model = InducedErrorModel()
self._copyValues(model)
return model
class CrossValidatorTests(PySparkTestCase):
def test_fit_minimize_metric(self):
sqlContext = SQLContext(self.sc)
dataset = sqlContext.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
bestModel = cvModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(0.0, bestModelMetric, "Best model has RMSE of 0")
def test_fit_maximize_metric(self):
sqlContext = SQLContext(self.sc)
dataset = sqlContext.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
bestModel = cvModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(1.0, bestModelMetric, "Best model has R-squared of 1")
if __name__ == "__main__":
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
| chenc10/Spark-PAF | python/pyspark/ml/tests.py | Python | apache-2.0 | 13,886 |
# -*- coding: utf-8 -*-
"""
Django settings for sous-chef project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import subprocess
from django.urls import reverse_lazy
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get(
'SOUSCHEF_DJANGO_SECRET_KEY',
'15ine$#^qas4_h2u7yk&lxat*&g*b8+)@wp$2x@vi2#v9)i2#u') # For dev purposes
# SECURITY WARNING: don't run with debug turned on in production!
if os.environ.get('SOUSCHEF_ENVIRONMENT_NAME') == 'PROD':
DEBUG = False
else:
DEBUG = True
# This IP may change for different computers and should be the
# request.META.get('REMOTE_ADDR') for your local computer.
# Don't run with this in production and don't
# commit any changes to this INTERNAL_IPS settings.
# WE NEED THIS IN ORDER TO USE 'debug' IN TEMPLATES!!!
INTERNAL_IPS = os.environ.get(
'SOUSCHEF_DJANGO_INTERNAL_IPS',
# Default value is an example: use space-separated string in env
'172.19.0.1 172.19.0.101').strip().split()
ALLOWED_HOSTS = os.environ.get(
'SOUSCHEF_DJANGO_ALLOWED_HOSTS',
# Default value is an example: use space-separated string in env
'souschef.example.com test.souschef.example.com').strip().split()
# Application definition
INSTALLED_APPS = [
# Django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third party apps
'annoying',
'avatar',
'django_filters',
'formtools',
'leaflet',
'rules.apps.AutodiscoverRulesConfig',
# Sous-chef apps
'sous_chef',
'billing',
'datamigration',
'delivery',
'meal',
'member.apps.MemberConfig',
'order',
'notification',
'page',
'note',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
]
ROOT_URLCONF = 'sous_chef.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_DIR + '/sous_chef/templates/'
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.i18n',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'sous_chef.context_processors.total',
],
},
},
]
WSGI_APPLICATION = 'sous_chef.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'feast',
'USER': 'root',
'PASSWORD': '123456',
'HOST': 'db',
'port': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.'
'UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'NumericPasswordValidator',
},
]
LOGIN_URL = reverse_lazy('page:login')
LOGIN_REDIRECT_URL = reverse_lazy('page:home')
AUTHENTICATION_BACKENDS = (
'rules.permissions.ObjectPermissionBackend',
'django.contrib.auth.backends.ModelBackend',
)
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en'
USE_I18N = True
USE_L10N = True
# Store datetimes as UTC in database
USE_TZ = True
# Use this timezone when displaying datetimes
TIME_ZONE = 'America/Montreal'
# List of supported languages
LANGUAGES = (
('fr', 'Français'),
('en', 'English'),
)
LOCALE_PATHS = (
'meal/locale/',
'member/locale/',
'notification/locale/',
'order/locale/',
'page/locale/',
'delivery/locale/',
'billing/locale',
'note/locale',
)
FORMAT_MODULE_PATH = (
'sous_chef.formats',
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
BASE_DIR + '/sous_chef/assets/',
)
STATIC_URL = '/static/'
# Avatar files
if DEBUG:
# When using the development server, serve files directly from /media/
# https://docs.djangoproject.com/en/1.11/howto/static-files/#serving-files-uploaded-by-a-user-during-development
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
else:
# In non development mode, serve files from /static/ using gninx as
# dedicated server
# https://docs.djangoproject.com/en/1.11/howto/static-files/deployment/#serving-static-files-from-a-dedicated-server
MEDIA_ROOT = os.path.join(STATIC_ROOT, 'media')
MEDIA_URL = '/static/media/'
AVATAR_PROVIDERS = (
'avatar.providers.PrimaryAvatarProvider',
'avatar.providers.GravatarAvatarProvider',
'avatar.providers.DefaultAvatarProvider',
)
# Displayable information
SOUSCHEF_VERSION = os.environ.get('SOUSCHEF_VERSION') or ''
SOUSCHEF_ENVIRONMENT_NAME = os.environ.get('SOUSCHEF_ENVIRONMENT_NAME') or ''
try:
GIT_HEAD = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
GIT_TAG = subprocess.check_output(['git', 'describe', '--tags'])
except Exception as e:
GIT_HEAD = None
GIT_TAG = None
| savoirfairelinux/sous-chef | src/sous_chef/settings.py | Python | agpl-3.0 | 6,673 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Implementation of generic operators in the presence of Tensor"""
# pylint: disable=invalid-name, too-many-arguments
from __future__ import absolute_import as _abs
import tvm
from . import broadcast as _broadcast
from . import math as _math
def _make_bop(broadcast_bop, orig_bop):
"""Make a specific overloaded binary operator of Tensor when applicable;
apply the original operator if it is not supposed to be overloaded.
Consider the following scenario:
OP : + | - | * | /
R0 : int | float | Expr | TensorSlice | Tensor (rank zero)
R1 : Tensor (positive rank)
In terms of (LHS OP RHS), we apply the following overloading rules:
(1) We use broadcast_OP(LHS, RHS), when both LHS and RHS are R1.
(2) We perform element-wise operation of Tensor and scalar,
when one of LHS and RHS is R1 and another is R0.
(3) We do not overload OP (i.e. stick to orig_bop) otherwise.
Parameters
----------
broadcast_bop : operator function
Operator for broadcast tensor-tensor operation, for rule (1).
orig_bop: operator function
Operator before overloading, for rule (3).
Returns
-------
ret : operator function
The overloaded operator function if applicable or orig_bop otherwise.
"""
name = orig_bop.__name__
def _tensor_bop_impl(lhs, rhs):
"""Overloaded {op} operator.
If both operands are non-zero-rank Tensors, it performs
tensor-tensor {op} operation, and broadcasts inputs when necessary.
If one operand is non-zero-rank Tensor, while the other operand is
scalar like type (e.g., numeric types, Expr, or TensorSlice),
it performs tensor-scalar {op} operation on an element-wise basis.
Otherwise, it performs default generic.{op} operation, as defined
in tvm.generic module.
Parameters
----------
lhs : object
Left operand.
rhs : object
Right operand.
Returns
-------
ret : tvm.Tensor (if at least one operand is non-zero-rank Tensor)
tvm.Expr (otherwise)
The result of {op} operation.
"""
if not isinstance(lhs, tvm.tensor.Tensor) and not isinstance(rhs, tvm.tensor.Tensor):
return orig_bop(lhs, rhs)
return broadcast_bop(lhs, rhs)
_tensor_bop_impl.__doc__ = _tensor_bop_impl.__doc__.format(op=name)
return _tensor_bop_impl
def _bind_generic_ops():
"""Bind generic operators for Tensor."""
# Check __op_priority__ to make sure the binding happens only once.
__op_priority__ = 1
if __op_priority__ > tvm.generic.__op_priority__:
tvm.generic.__op_priority__ = __op_priority__
tvm.generic.add = _make_bop(_broadcast.add, tvm.generic.add)
tvm.generic.subtract = _make_bop(_broadcast.subtract, tvm.generic.subtract)
tvm.generic.multiply = _make_bop(_broadcast.multiply, tvm.generic.multiply)
tvm.generic.divide = _make_bop(_broadcast.divide, tvm.generic.divide)
tvm.generic.cast = _math.cast
_bind_generic_ops()
| Huyuwei/tvm | topi/python/topi/generic_op_impl.py | Python | apache-2.0 | 3,896 |
"""
Program to make system boops into music
Copyright (C) 2016 treefroog
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import winsound
scale = 440
ratio = 1.05946
notes = {
'A': 0,
'A#': 1,
'B': 2,
'C': 3,
'C#': 4,
'D': 5,
'D#': 6,
'E': 7,
'F': 8,
'F#': 9,
'G': 10,
'G#': 11,
}
tempered_notes = {}
for note in notes:
freq = scale * ratio ** notes.get(note)
tempered_notes[note] = int(freq)
def play_note(note, duration=500):
"""Plays the notes for a certain duration"""
winsound.Beep(note, duration)
def frequency(note):
"""Finds the frequency of a note"""
pitch_level = note[1]
name = note[0]
step = 12 * pitch_level + notes[name]
return round(110 * 2 ** (step / 12))
def play_note_list(note_list):
"""Plays the list of notes"""
for note in note_list:
freq = frequency(note)
play_note(freq)
song = input("Notes in order:")
note_list = song.split()
note_final_list = []
i = 0
for note in note_list:
if len(note) > 2:
note_final_list.insert(i, [note[:2].upper(), int(note[2:])])
else:
note_final_list.insert(i, [note[:1].upper(), int(note[1:])])
i += 1
play_note_list(note_final_list)
| treefroog/Music.py | python music.py | Python | gpl-3.0 | 1,745 |
import unittest
from webassets import Bundle, Environment
from webassets_react import React
from os import path
class ReactFilterTestCase(unittest.TestCase):
def test_output(self):
environment = Environment(path.dirname(__file__))
bundle = Bundle('input.jsx', output='input.js', filters=('react',))
environment.register('test_bundle', bundle)
bundle.build()
| DotNetAge/webassets-react | tests/test_suite.py | Python | bsd-3-clause | 396 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from PIL import Image
def graf2png(weburl, username, password, timeout, imgname, hwin, wwin, onlypanel):
driver = webdriver.PhantomJS()
driver.set_window_size(hwin, wwin)
driver.get(weburl)
# Introducimos username
in_user = driver.find_element_by_name('username')
in_user.clear()
in_user.send_keys(username)
# Introducimos password
in_pass = driver.find_element_by_id('inputPassword')
in_pass.clear()
in_pass.send_keys(password)
in_pass.send_keys(Keys.ENTER)
# Espera a que cargue la consulta
time.sleep(timeout)
# Timestamp para evitar sobreescribir capturas
currtime = time.strftime("%y%m%d%H%M%S", time.localtime())
imgname = imgname + currtime + '.png'
# Realizar screenshot
driver.save_screenshot(imgname)
print("Screen guardada como: " + imgname)
# Recortar panel(?)
# Solo funciona con los paneles cuya clase sea 'panel-fullscreen',
# esta es la clase que tiene por defecto los paneles cuando
# generas un enlace para compartir. (Share Panel > Link > Copy)
if (onlypanel):
panel = driver.find_element_by_class_name('panel-fullscreen')
plocation = panel.location
psize = panel.size
left = plocation['x']
top = plocation['y']
right = plocation['x'] + psize['width']
bottom = plocation['y'] + psize['height']
pimg = Image.open(imgname)
pimg = pimg.crop((left, top, right, bottom))
pimgname = 'panel_' + imgname
pimg.save(pimgname)
print("Panel recortado guardado como: " + pimgname)
| andoniaf/DefGrafana.py | graf2png.py | Python | gpl-3.0 | 1,737 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for LocalFileSystem."""
import filecmp
import os
import shutil
import tempfile
import unittest
import mock
from apache_beam.io import localfilesystem
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystems import FileSystems
def _gen_fake_join(separator):
"""Returns a callable that joins paths with the given separator."""
def _join(first_path, *paths):
return separator.join((first_path.rstrip(separator),) + paths)
return _join
class FileSystemsTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_get_scheme(self):
self.assertIsNone(FileSystems.get_scheme('/abc/cdf'))
self.assertIsNone(FileSystems.get_scheme('c:\\abc\cdf')) # pylint: disable=anomalous-backslash-in-string
self.assertEqual(FileSystems.get_scheme('gs://abc/cdf'), 'gs')
def test_get_filesystem(self):
self.assertTrue(isinstance(FileSystems.get_filesystem('/tmp'),
localfilesystem.LocalFileSystem))
self.assertTrue(isinstance(FileSystems.get_filesystem('c:\\abc\def'), # pylint: disable=anomalous-backslash-in-string
localfilesystem.LocalFileSystem))
with self.assertRaises(ValueError):
FileSystems.get_filesystem('error://abc/def')
@mock.patch('apache_beam.io.localfilesystem.os')
def test_unix_path_join(self, *unused_mocks):
# Test joining of Unix paths.
localfilesystem.os.path.join.side_effect = _gen_fake_join('/')
self.assertEqual('/tmp/path/to/file',
FileSystems.join('/tmp/path', 'to', 'file'))
self.assertEqual('/tmp/path/to/file',
FileSystems.join('/tmp/path', 'to/file'))
self.assertEqual('/tmp/path/to/file',
FileSystems.join('/', 'tmp/path', 'to/file'))
self.assertEqual('/tmp/path/to/file',
FileSystems.join('/tmp/', 'path', 'to/file'))
@mock.patch('apache_beam.io.localfilesystem.os')
def test_windows_path_join(self, *unused_mocks):
# Test joining of Windows paths.
localfilesystem.os.path.join.side_effect = _gen_fake_join('\\')
self.assertEqual(r'C:\tmp\path\to\file',
FileSystems.join(r'C:\tmp\path', 'to', 'file'))
self.assertEqual(r'C:\tmp\path\to\file',
FileSystems.join(r'C:\tmp\path', r'to\file'))
self.assertEqual(r'C:\tmp\path\to\file',
FileSystems.join(r'C:\tmp\path\\', 'to', 'file'))
def test_mkdirs(self):
path = os.path.join(self.tmpdir, 't1/t2')
FileSystems.mkdirs(path)
self.assertTrue(os.path.isdir(path))
def test_mkdirs_failed(self):
path = os.path.join(self.tmpdir, 't1/t2')
FileSystems.mkdirs(path)
# Check IOError if existing directory is created
with self.assertRaises(IOError):
FileSystems.mkdirs(path)
with self.assertRaises(IOError):
FileSystems.mkdirs(os.path.join(self.tmpdir, 't1'))
def test_match_file(self):
path = os.path.join(self.tmpdir, 'f1')
open(path, 'a').close()
# Match files in the temp directory
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [path])
def test_match_file_empty(self):
path = os.path.join(self.tmpdir, 'f2') # Does not exist
# Match files in the temp directory
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [])
def test_match_file_exception(self):
# Match files with None so that it throws an exception
with self.assertRaisesRegexp(BeamIOError,
r'^Unable to get the Filesystem') as error:
FileSystems.match([None])
self.assertEqual(error.exception.exception_details.keys(), [None])
def test_match_directory(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
open(path1, 'a').close()
open(path2, 'a').close()
# Match both the files in the directory
path = os.path.join(self.tmpdir, '*')
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [path1, path2])
def test_match_directory(self):
result = FileSystems.match([self.tmpdir])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [self.tmpdir])
def test_copy(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.copy([path1], [path2])
self.assertTrue(filecmp.cmp(path1, path2))
def test_copy_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with self.assertRaisesRegexp(BeamIOError,
r'^Copy operation failed') as error:
FileSystems.copy([path1], [path2])
self.assertEqual(error.exception.exception_details.keys(), [(path1, path2)])
def test_copy_directory(self):
path_t1 = os.path.join(self.tmpdir, 't1')
path_t2 = os.path.join(self.tmpdir, 't2')
FileSystems.mkdirs(path_t1)
FileSystems.mkdirs(path_t2)
path1 = os.path.join(path_t1, 'f1')
path2 = os.path.join(path_t2, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.copy([path_t1], [path_t2])
self.assertTrue(filecmp.cmp(path1, path2))
def test_rename(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.rename([path1], [path2])
self.assertTrue(FileSystems.exists(path2))
self.assertFalse(FileSystems.exists(path1))
def test_rename_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with self.assertRaisesRegexp(BeamIOError,
r'^Rename operation failed') as error:
FileSystems.rename([path1], [path2])
self.assertEqual(error.exception.exception_details.keys(), [(path1, path2)])
def test_rename_directory(self):
path_t1 = os.path.join(self.tmpdir, 't1')
path_t2 = os.path.join(self.tmpdir, 't2')
FileSystems.mkdirs(path_t1)
path1 = os.path.join(path_t1, 'f1')
path2 = os.path.join(path_t2, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.rename([path_t1], [path_t2])
self.assertTrue(FileSystems.exists(path_t2))
self.assertFalse(FileSystems.exists(path_t1))
self.assertTrue(FileSystems.exists(path2))
self.assertFalse(FileSystems.exists(path1))
def test_exists(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
self.assertTrue(FileSystems.exists(path1))
self.assertFalse(FileSystems.exists(path2))
def test_delete(self):
path1 = os.path.join(self.tmpdir, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
self.assertTrue(FileSystems.exists(path1))
FileSystems.delete([path1])
self.assertFalse(FileSystems.exists(path1))
def test_delete_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
with self.assertRaisesRegexp(BeamIOError,
r'^Delete operation failed') as error:
FileSystems.delete([path1])
self.assertEqual(error.exception.exception_details.keys(), [path1])
| jbonofre/beam | sdks/python/apache_beam/io/filesystems_test.py | Python | apache-2.0 | 8,304 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kubevirt_rs
short_description: Manage KubeVirt virtual machine replica sets
description:
- Use Openshift Python SDK to manage the state of KubeVirt virtual machine replica sets.
version_added: "2.8"
author: KubeVirt Team (@kubevirt)
options:
state:
description:
- Create or delete virtual machine replica sets.
default: "present"
choices:
- present
- absent
type: str
name:
description:
- Name of the virtual machine replica set.
required: true
type: str
namespace:
description:
- Namespace where the virtual machine replica set exists.
required: true
type: str
selector:
description:
- "Selector is a label query over a set of virtual machine."
required: true
type: dict
replicas:
description:
- Number of desired pods. This is a pointer to distinguish between explicit zero and not specified.
- Replicas defaults to 1 if newly created replica set.
type: int
extends_documentation_fragment:
- k8s_auth_options
- kubevirt_vm_options
- kubevirt_common_options
requirements:
- python >= 2.7
- openshift >= 0.8.2
'''
EXAMPLES = '''
- name: Create virtual machine replica set 'myvmir'
kubevirt_rs:
state: presnet
name: myvmir
namespace: vms
wait: true
replicas: 3
memory: 64M
labels:
myvmi: myvmi
selector:
matchLabels:
myvmi: myvmi
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Remove virtual machine replica set 'myvmir'
kubevirt_rs:
state: absent
name: myvmir
namespace: vms
wait: true
'''
RETURN = '''
kubevirt_rs:
description:
- The virtual machine virtual machine replica set managed by the user.
- "This dictionary contains all values returned by the KubeVirt API all options
are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstance)"
returned: success
type: complex
contains: {}
'''
import copy
import traceback
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC
try:
from openshift.dynamic.client import ResourceInstance
except ImportError:
# Handled in module_utils
pass
from ansible.module_utils.kubevirt import (
virtdict,
KubeVirtRawModule,
VM_COMMON_ARG_SPEC,
)
KIND = 'VirtualMachineInstanceReplicaSet'
VMIR_ARG_SPEC = {
'replicas': {'type': 'int'},
'selector': {'type': 'dict'},
}
class KubeVirtVMIRS(KubeVirtRawModule):
@property
def argspec(self):
""" argspec property builder """
argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
argument_spec.update(copy.deepcopy(VM_COMMON_ARG_SPEC))
argument_spec.update(copy.deepcopy(VMIR_ARG_SPEC))
return argument_spec
def _read_stream(self, resource, watcher, stream, name, replicas):
""" Wait for ready_replicas to equal the requested number of replicas. """
if self.params.get('state') == 'absent':
# TODO: Wait for absent
return
return_obj = None
for event in stream:
if event.get('object'):
obj = ResourceInstance(resource, event['object'])
if obj.metadata.name == name and hasattr(obj, 'status'):
if replicas == 0:
if not hasattr(obj.status, 'readyReplicas') or not obj.status.readyReplicas:
return_obj = obj
watcher.stop()
break
if hasattr(obj.status, 'readyReplicas') and obj.status.readyReplicas == replicas:
return_obj = obj
watcher.stop()
break
if not return_obj:
self.fail_json(msg="Error fetching the patched object. Try a higher wait_timeout value.")
if replicas and return_obj.status.readyReplicas is None:
self.fail_json(msg="Failed to fetch the number of ready replicas. Try a higher wait_timeout value.")
if replicas and return_obj.status.readyReplicas != replicas:
self.fail_json(msg="Number of ready replicas is {0}. Failed to reach {1} ready replicas within "
"the wait_timeout period.".format(return_obj.status.ready_replicas, replicas))
return return_obj.to_dict()
def wait_for_replicas(self):
namespace = self.params.get('namespace')
wait_timeout = self.params.get('wait_timeout')
replicas = self.params.get('replicas')
name = self.name
resource = self.find_supported_resource(KIND)
w, stream = self._create_stream(resource, namespace, wait_timeout)
return self._read_stream(resource, w, stream, name, replicas)
def execute_module(self):
# Parse parameters specific for this module:
definition = virtdict()
selector = self.params.get('selector')
replicas = self.params.get('replicas')
if selector:
definition['spec']['selector'] = selector
if replicas is not None:
definition['spec']['replicas'] = replicas
# Execute the CURD of VM:
template = definition['spec']['template']
dummy, definition = self.construct_vm_definition(KIND, definition, template)
result_crud = self.execute_crud(KIND, definition)
changed = result_crud['changed']
result = result_crud.pop('result')
# Wait for the replicas:
wait = self.params.get('wait')
if wait:
result = self.wait_for_replicas()
# Return from the module:
self.exit_json(**{
'changed': changed,
'kubevirt_rs': result,
'result': result_crud,
})
def main():
module = KubeVirtVMIRS()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| dagwieers/ansible | lib/ansible/modules/cloud/kubevirt/kubevirt_rs.py | Python | gpl-3.0 | 6,766 |
import os
import sys
import socket
import threading
def openRegistrationInterface(IP, PORT, converter):
server_socket1 = socket.socket()
server_socket1.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket1.bind((IP, PORT))
server_socket1.listen(0)
while True:
client_socket1, addr = server_socket1.accept()
print('Registration is connected to Security Controller')
data = client_socket1.recv(1024)
converter.registerNSF(data)
def request_nsf(IP, PORT, nsf_name):
ADDR = (IP, PORT)
client_socket2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client_socket2.connect(ADDR)
client_socket2.send(nsf_name)
except Exception as e:
print("%s:%s" % ADDR)
sys.exit()
def receive_nsf_ip(IP, PORT):
server_socket = socket.socket()
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((IP, PORT))
server_socket.listen(5)
while True:
client_socket, addr = server_socket.accept()
data = client_socket.recv(1024)
data = data.split(",")
#print("sudo /home/ubuntu/confd-6.6/bin/netconf-console --host "+data[1]+" /home/ubuntu/LowLevelPolicy/"+data[0]+".xml > factor/ip.txt")
os.system("/home/ubuntu/confd-6.6/bin/netconf-console --host "+data[1]+" /home/ubuntu/LowLevelPolicy/"+data[0]+".xml")
"""
class SocketThread(threading.Thread):
def _bootstrap(self, stop_thread=False):
def stop():
nonlocal stop_thread
stop_thread = True
self.stop = stop
def tracer(*_):
if stop_thread:
raise StopThread()
return tracer
sys.settrace(tracer)
super()._bootstrap()
"""
| kimjinyong/i2nsf-framework | mininet/SecurityController/API/socketAPI.py | Python | apache-2.0 | 1,639 |
# -*- coding: utf-8 -*-
# Copyright (C) 1998-2012 by the Free Software Foundation, Inc.
#
# This file is part of HyperKitty.
#
# HyperKitty is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# HyperKitty is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# HyperKitty. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aamir Khan <syst3m.w0rm@gmail.com>
# Author: Aurelien Bompard <abompard@fedoraproject.org>
#
# pylint: disable=unused-argument
from django.conf import settings
from django.shortcuts import resolve_url
from hyperkitty import VERSION
def export_settings(request):
exports = ["APP_NAME", "USE_MOCKUPS", "USE_INTERNAL_AUTH"]
extra_context = dict(
(name.lower(), getattr(settings, name)) for name in exports)
extra_context["HYPERKITTY_VERSION"] = VERSION
# Login and logout URLs can be view names since Django 1.6
extra_context["login_url"] = resolve_url(settings.LOGIN_URL)
extra_context["logout_url"] = resolve_url(settings.LOGOUT_URL)
return extra_context
from django.core.urlresolvers import reverse, NoReverseMatch
def postorius_info(request):
postorius_url = False
if "postorius" in settings.INSTALLED_APPS:
try:
postorius_url = reverse("postorius.views.list_index")
postorius_url = postorius_url.rstrip("/")
except NoReverseMatch:
pass
return {"postorius_installed": postorius_url }
| khushboo9293/Hyperkitty | hyperkitty/context_processors.py | Python | gpl-3.0 | 1,859 |
import cv2
import numpy as np
import sys
ED_KERNEL_SIZE = 3
BLUR_KERNEL_SIZE = 9
CONTOUR_AREA_THRESHOLD = 800
colors = [(255,0,0),
(0,255,0),
(255,255,0),
(255,0,255)]
upper = (20,255,255)
lower = (0,135,100)
class BoundingBoxExtremes():
def __init__(self, mx, mix, my, miy):
self.max_x = mx
self.min_x = mix
self.max_y = my
self.min_y = miy
self.x_mid = int((self.max_x + self.min_x) / 2)
self.y_mid = int((self.max_y + self.min_y) / 2)
self.x_center = self.x_mid
self.y_center = self.y_mid
def find_gate(original_image, draw=True):
cv_image = original_image.copy()
cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
ret_fields = list()
image_width = original_image.shape[1]
image_height = original_image.shape[0]
cv_image = cv2.inRange(cv_image, lower, upper)
cv_image = clean_noise(cv_image, mode='erode')
contours = get_contours(cv_image)
blank = np.zeros(cv_image.shape, np.uint8)
blank = cv2.fillPoly(blank, contours, color=(255,255,255))
blank = dilate_contours(blank)
box_points = cv2.findNonZero(blank)
if box_points is not None:
box = bounding_box_nonzeros(box_points)
if draw:
cv2.drawContours(original_image, [box], 0, (255,0,0), 2)
if draw:
iteration = 0
for point in tuple_points(box):
cv2.circle(original_image, point, 3, colors[iteration], -1)
iteration += 1
extremes = get_extremes(tuple_points(box))
if draw:
cv2.circle(original_image, (extremes.x_mid, extremes.y_mid), 3, (255,0,0), -1)
cv2.circle(original_image, (extremes.x_center, extremes.y_center), 3, (0,0,255), -1)
ret_fields = [1,1,0, extremes.min_x, extremes.min_y, extremes.max_x, extremes.max_y, extremes.x_mid, extremes.y_mid, extremes.x_center, extremes.y_center, int(image_width / 2), int(image_height / 2)]
return ret_fields, original_image
def find_pole(original_image, draw=True):
cv_image = original_image.copy()
cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
ret_fields = list()
image_width = original_image.shape[1]
image_height = original_image.shape[0]
cv_image = cv2.inRange(cv_image, lower, upper)
cv_image = clean_noise(cv_image, mode='erode')
contours = get_contours(cv_image, clean=False)
blank = np.zeros(cv_image.shape, np.uint8)
blank = cv2.fillPoly(blank, contours, color=(255,255,255))
blank = dilate_contours(blank)
box_points = cv2.findNonZero(blank)
if box_points is not None:
box = bounding_box_nonzeros(box_points)
if draw:
cv2.drawContours(original_image, [box], 0, (255,0,0), 2)
if draw:
iteration = 0
for point in tuple_points(box):
cv2.circle(original_image, point, 3, colors[iteration], -1)
iteration += 1
extremes = get_extremes(tuple_points(box))
if draw:
cv2.circle(original_image, (extremes.x_mid, extremes.y_mid), 3, (255,0,0), -1)
cv2.circle(original_image, (extremes.x_center, extremes.y_center), 3, (0,0,255), -1)
ret_fields = [0,0, extremes.min_x, extremes.min_y, extremes.max_x, extremes.max_y, extremes.x_mid, extremes.y_mid, int(image_width / 2), int(image_height / 2)]
return ret_fields, original_image
def get_extremes(tuple_points):
mx, mix, my, miy = -1, sys.maxsize, -1, sys.maxsize
for point in tuple_points:
mx = point[0] if point[0] > mx else mx
mix = point[0] if point[0] < mix else mix
my = point[1] if point[1] > my else my
miy = point[1] if point[1] < miy else miy
return BoundingBoxExtremes(mx, mix, my, miy)
def bounding_box_nonzeros(nz_points):
rect = cv2.minAreaRect(nz_points)
box = cv2.boxPoints(rect)
return np.int0(box)
def tuple_points(box):
points = list()
for point in range(len(box)):
points.append(tuple([box][0][point]))
return points
def dilate_contours(mask):
kernel = np.ones((ED_KERNEL_SIZE, ED_KERNEL_SIZE), np.uint8)
mask = cv2.dilate(mask, kernel, iterations=3)
return mask
def clean_noise(cv_image_mask, mode='erode'):
kernel = np.ones((ED_KERNEL_SIZE, ED_KERNEL_SIZE), np.uint8)
if mode is 'blur' or mode is 'both':
cv_image_mask = cv2.blur(cv_image_mask, (BLUR_KERNEL_SIZE, BLUR_KERNEL_SIZE))
if mode is 'erode' or mode is 'both':
cv_image_mask = cv2.erode(cv_image_mask, kernel, iterations=1)
cv_image_mask = cv2.dilate(cv_image_mask, kernel, iterations=3)
return cv_image_mask
def clean_contours(contours):
c_contours = list()
for contour in contours:
passing = True
if cv2.contourArea(contour) > CONTOUR_AREA_THRESHOLD:
passing = True
else:
passing = False
if passing and cv2.contourArea(contour) / cv2.arcLength(contour, True) < 10:
passing = True
c_contours.append(contour)
else:
passing = False
return c_contours
def get_contours(cv_image_mask, clean=True):
im2, contours, hierarcy = cv2.findContours(cv_image_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = clean_contours(contours) if clean else contours
return contours
def get_points(contours):
points = list()
if type(contours[0]) is int:
return contours
else:
points.append(get_points(contours[0]))
return points | osu-uwrt/riptide-ros | riptide_vision/scripts/riptide_vision/lib.py | Python | bsd-2-clause | 4,983 |
#!/usr/bin/env python
import os, sys, timing
db = os.environ.get('SMC_DB', 'migrate')
def process(x):
base, ext = os.path.splitext(x)
name = os.path.split(base)[1]
if name.endswith('-time'):
name = name[:-5]
if name.startswith('update-'):
name = name[len('update-'):]
timing.start(name, 'read_from_csv')
s = """time echo "drop table %s_json; create table %s_json (a JSONB); copy %s_json from '%s' with (format csv, DELIMITER e'\\1', QUOTE e'\\2');" | psql %s """%(name, name, name, os.path.abspath(x), db)
print(s)
if os.system(s):
raise RuntimeError("error exporting from rethinkdb - %s"%x)
timing.done(name, 'read_from_csv')
if __name__ == "__main__":
for file in sys.argv[1:]:
process(file) | DrXyzzy/smc | src/scripts/postgresql/migrate/read_from_csv.py | Python | agpl-3.0 | 770 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('msgs', '0012_message_labels'),
]
operations = [
migrations.AlterField(
model_name='label',
name='org',
field=models.ForeignKey(related_name='labels', verbose_name='Organization', to='orgs.Org'),
),
]
| xkmato/casepro | casepro/msgs/migrations/0013_auto_20160223_0917.py | Python | bsd-3-clause | 444 |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 29 10:14:07 2018
@author: Dell
"""
import uuid
from sqlalchemy.sql import and_
from .orm import Config,Point,Frame,Area,PointLoad,PointRestraint
import logger
def add_point(self,x,y,z):
"""
Add point object to model, if the name already exists, an exception will be raised.
if a point in same location exists, the name of the point will be returned.
param:
x,y,z: float-like, coordinates in SI.
[name]: str, name, optional.
return:
str, the new point's name.
"""
try:
pt=Point()
pt.x=x
pt.y=y
pt.z=z
pt.uuid=str(uuid.uuid1())
pt.name=pt.uuid
self.session.add(pt)
return pt.name
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def set_point_restraint_batch(self,points,restraints):
"""
params:
point: list of str, name of point
restraints: bool, list of 6 to set restraints
return:
status of success
"""
try:
assert len(restraints)==6
reses=[]
for point in points:
res=PointRestraint()
res.point_name=point
res.u1=restraints[0]
res.u2=restraints[1]
res.u3=restraints[2]
res.r1=restraints[3]
res.r2=restraints[4]
res.r3=restraints[5]
reses.append(res)
self.session.add_all(reses)
return True
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def set_point_restraint(self,point,restraints):
"""
params:
point: str, name of point
restraints: bool, list of 6 to set restraints
return:
status of success
"""
try:
assert len(restraints)==6
pt=self.session.query(Point).filter_by(name=point).first()
if pt is None:
raise Exception("Point doesn't exists.")
res=self.session.query(PointRestraint).filter_by(point_name=point).first()
if res is None:
res=PointRestraint()
res.point_name=point
res.u1=restraints[0]
res.u2=restraints[1]
res.u3=restraints[2]
res.r1=restraints[3]
res.r2=restraints[4]
res.r3=restraints[5]
self.session.add(res)
elif not (restraints[0] or restraints[1] or restraints[2] or\
restraints[3] or restraints[4] or restraints[5]):
self.session.delete(res)
else:
res.u1=restraints[0]
res.u2=restraints[1]
res.u3=restraints[2]
res.r1=restraints[3]
res.r2=restraints[4]
res.r3=restraints[5]
self.session.add(res)
return True
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def set_point_load(self,point,loadcase,load):
"""
params:
point: str, name of point.
loadcase: str, name of loadcase.
load: float, list of 6 to set restraints.
return:
status of success.
"""
try:
assert len(load)==6
pt=self.session.query(Point).filter_by(name=point).first()
if pt is None:
raise Exception("Point doesn't exists.")
ld=self.session.query(PointLoad).filter_by(point_name=point,loadcase_name=loadcase).first()
if ld is None:
ld=PointLoad()
scale=self.scale()
ld.point_name=point
ld.loadcase_name=loadcase
ld.p1=load[0]*scale['F']
ld.p2=load[1]*scale['F']
ld.p3=load[2]*scale['F']
ld.m1=load[3]*scale['F']*scale['L']
ld.m2=load[4]*scale['F']*scale['L']
ld.m3=load[5]*scale['F']*scale['L']
self.session.add(ld)
return True
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def set_point_coordinate(self,name,x,y,z):
"""
Set point coordinate.
if a point in same location exists, the name of the point will be returned.
param:
x,y,z: float-like, coordinates in current unit.
[name]: str, name, optional.
return:
str, the new point's name.
"""
try:
pt=self.session.query(Point).filter_by(name=name).first()
if pt is None:
raise Exception("Point doesn't exists.")
scale=self.scale()
pt.x=x*scale['L']
pt.y=y*scale['L']
pt.z=z*scale['L']
self.session.add(pt)
return True
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def set_point_mass(self,name,u1,u2,u3,r1,r2,r3):
try:
pt=self.session.query(Point).filter_by(name=name).first()
if pt is None:
raise Exception("Point doesn't exists.")
scale=self.scale()
pass
return True
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def set_mass_sources(self,source):
try:
pass
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def get_point_names(self):
"""
Get all the name of points in the database
returns:
point list satisfies the coordiniates if successful or None if failed.
"""
try:
pts=self.session.query(Point)
names=[pt.name for pt in pts.all()]
return names
except Exception as e:
logger.info(str(e))
return None
def get_point_name_by_coor(self,x=None,y=None,z=None):
"""
Get the name of points in the database
params:
name: str
x,y,z: coordinates in current_unit
returns:
point list satisfies the coordiniates if successful or None if failed.
"""
try:
tol=self.session.query(Config).first().tolerance
pts=self.session.query(Point)
scale=self.scale()
if x is not None:
pts=pts.filter(and_((Point.x-x*scale['L'])<tol,(x-Point.x*scale['L'])<tol))
if y is not None:
pts=pts.filter(and_((Point.y-y*scale['L'])<tol,(y-Point.y*scale['L'])<tol))
if z is not None:
pts=pts.filter(and_((Point.z-z*scale['L'])<tol,(z-Point.z*scale['L'])<tol))
names=[pt.name for pt in pts.all()]
return names
except Exception as e:
logger.info(str(e))
self.session.rollback()
return None
def get_point_coordinate(self,name):
"""
Get point coordinate.
param:
name: str, name, optional.
return:
status of success, and tuple of point's coordinate if or None if failed.
"""
try:
pt=self.session.query(Point).filter_by(name=name).first()
if pt is None:
raise Exception("Point doesn't exists.")
scale=self.scale()
x=pt.x/scale['L']
y=pt.y/scale['L']
z=pt.z/scale['L']
return x,y,z
except Exception as e:
logger.info(str(e))
self.session.rollback()
return None
def merge_points(self,tol=1e-3):
"""
merge points within certain tolerance.
params:
tol: float, tolerance in in current unit.
return:
status of success.
"""
try:
pts=self.session.query(Point).order_by(Point.x,Point.y,Point.z).all()
pt_map=dict([(pt.name,pt.name) for pt in pts])
pts_to_rmv=[]
scale=self.scale()
for pti,ptj in zip(pts[:-1],pts[1:]):
if (ptj.x-pti.x)**2+(ptj.y-pti.y)**2+(ptj.z-pti.z)**2<(tol*scale['L'])**2:
# pti.point_restraint.point_name=ptj.name
# pti.point_load.point_name=ptj.name
# pti.point_disp.point_name=ptj.name
# pti.point_mass.point_name=ptj.name
# pti.point_restraint+=ptj.point_restraint
# pti.point_load+=ptj.point_load
# pti.point_disp+=ptj.point_disp
# pti.point_mass+=ptj.point_mass
pt_map[ptj.name]=pt_map[pti.name]
pts_to_rmv.append(ptj)
frames=self.session.query(Frame).all()
areas=self.session.query(Area).all()
logger.info(len(pts_to_rmv))
for frm in frames:
if (frm.pt0_name in pts_to_rmv) or (frm.pt1_name in pts_to_rmv):
if pt_map[frm.pt0_name]<pt_map[frm.pt1_name]:
frm.pt0_name=pt_map[frm.pt0_name]
frm.pt1_name=pt_map[frm.pt1_name]
frm.order='01'
self.session.add(frm)
elif pt_map[frm.pt0_name]>pt_map[frm.pt1_name]:
frm.pt0_name=pt_map[frm.pt1_name]
frm.pt1_name=pt_map[frm.pt0_name]
frm.order='10'
self.session.add(frm)
else:
self.session.delete(frm)
for area in areas:
area.pt0_name=pt_map[area.pt0_name]
area.pt1_name=pt_map[area.pt1_name]
area.pt2_name=pt_map[area.pt2_name]
area.pt3_name=pt_map[area.pt3_name]
self.session.add(area)
for pt in pts_to_rmv:
self.session.delete(pt)
self.session.flush()
pts=self.session.query(Point).all()
logger.info('merge elements %d'%len(pts))
return True
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def set_point_name(self,name):
try:
pt=self.session.query(Point).filter_by(name=name)
if pt is None:
raise Exception("Point doen't exist!")
pt.name=name
self.session.add(pt)
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def delete_point(self,name):
try:
pt=self.session.query(Point).filter_by(name=name)
if pt is None:
raise Exception("Point doen't exist!")
self.session.delete(pt)
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False | zhuoju36/StructEngPy | object_model/point.py | Python | mit | 10,395 |
"""locallibrary URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls import include
from django.views.generic import RedirectView
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
# Use include() to add URLS from the itclassified application
urlpatterns += [
url(r'^itclassified/', include('itclassified.urls')),
]
#Add URL maps to redirect the base URL to our application
urlpatterns += [
url(r'^$', RedirectView.as_view(url='/itclassified/', permanent=True)),
]
# Use static() to add url mapping to serve static files during development (only)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
#Add Django site authentication urls (for login, logout, password management)
urlpatterns += [
url(r'^accounts/', include('django.contrib.auth.urls')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | netdaniels/it_classified | classified/urls.py | Python | mit | 1,616 |
"""
MPEG audio file parser.
Creation: 12 decembre 2005
Author: Victor Stinner
"""
from hachoir_py2.parser import Parser
from hachoir_py2.field import (FieldSet,
MissingField, ParserError, createOrphanField,
Bit, Bits, Enum,
PaddingBits, PaddingBytes,
RawBytes)
from hachoir_py2.parser.audio.id3 import ID3v1, ID3v2
from hachoir_py2.core.endian import BIG_ENDIAN
from hachoir_py2.core.tools import humanFrequency, humanBitSize
from hachoir_py2.core.bits import long2raw
from hachoir_py2.stream import InputStreamError
# Max MP3 filesize: 200 MB
MAX_FILESIZE = 200 * 1024 * 1024 * 8
class Frame(FieldSet):
VERSION_NAME = {0: "2.5", 2: "2", 3: "1"}
MPEG_I = 3
MPEG_II = 2
MPEG_II_5 = 0
LAYER_NAME = {1: "III", 2: "II", 3: "I"}
LAYER_I = 3
LAYER_II = 2
LAYER_III = 1
# Bit rates (bit_rate * 1000 = bits/sec)
# key 15 is always invalid
BIT_RATES = {
1: ( # MPEG1
(0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448), # layer I
(0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384), # layer II
(0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320), # layer III
# - 1 2 3 4 5 6 7 8 9 10 11 12 13 14 -
),
2: ( # MPEG2 / MPEG2.5
(0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256), # layer I
(0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160), # layer II
(0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160), # layer III
# - 1 2 3 4 5 6 7 8 9 10 11 12 13 14 -
)
}
SAMPLING_RATES = {
3: {0: 44100, 1: 48000, 2: 32000}, # MPEG1
2: {0: 22050, 1: 24000, 2: 16000}, # MPEG2
0: {0: 11025, 1: 12000, 2: 8000} # MPEG2.5
}
EMPHASIS_NAME = {0: "none", 1: "50/15 ms", 3: "CCIT J.17"}
CHANNEL_MODE_NAME = {
0: "Stereo",
1: "Joint stereo",
2: "Dual channel",
3: "Single channel"
}
# Channel mode => number of channels
NB_CHANNEL = {
0: 2,
1: 2,
2: 2,
3: 1,
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
if not self._size:
frame_size = self.getFrameSize()
if not frame_size:
raise ParserError("MPEG audio: Invalid frame %s" % self.path)
self._size = min(frame_size * 8, self.parent.size - self.address)
def createFields(self):
# Header
yield PaddingBits(self, "sync", 11, "Synchronize bits (set to 1)", pattern=1)
yield Enum(Bits(self, "version", 2, "MPEG audio version"), self.VERSION_NAME)
yield Enum(Bits(self, "layer", 2, "MPEG audio layer"), self.LAYER_NAME)
yield Bit(self, "crc16", "No CRC16 protection?")
# Rates and padding
yield Bits(self, "bit_rate", 4, "Bit rate")
yield Bits(self, "sampling_rate", 2, "Sampling rate")
yield Bit(self, "use_padding", "Stream field use padding?")
yield Bit(self, "extension", "Extension")
# Channel mode, mode extension, copyright, ...
yield Enum(Bits(self, "channel_mode", 2, "Channel mode"), self.CHANNEL_MODE_NAME)
yield Bits(self, "mode_ext", 2, "Mode extension")
yield Bit(self, "copyright", "Is copyrighted?")
yield Bit(self, "original", "Is original?")
yield Enum(Bits(self, "emphasis", 2, "Emphasis"), self.EMPHASIS_NAME)
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "data", size)
def isValid(self):
return (self["layer"].value != 0
and self["sync"].value == 2047
and self["version"].value != 1
and self["sampling_rate"].value != 3
and self["bit_rate"].value not in (0, 15)
and self["emphasis"].value != 2)
def getSampleRate(self):
"""
Read sampling rate. Returns None on error.
"""
version = self["version"].value
rate = self["sampling_rate"].value
try:
return self.SAMPLING_RATES[version][rate]
except (KeyError, IndexError):
return None
def getBitRate(self):
"""
Read bit rate in bit/sec. Returns None on error.
"""
layer = 3 - self["layer"].value
bit_rate = self["bit_rate"].value
if bit_rate in (0, 15):
return None
if self["version"].value == 3:
dataset = self.BIT_RATES[1] # MPEG1
else:
dataset = self.BIT_RATES[2] # MPEG2 / MPEG2.5
try:
return dataset[layer][bit_rate] * 1000
except (KeyError, IndexError):
return None
def getFrameSize(self):
"""
Read frame size in bytes. Returns None on error.
"""
frame_size = self.getBitRate()
if not frame_size:
return None
sample_rate = self.getSampleRate()
if not sample_rate:
return None
padding = int(self["use_padding"].value)
if self["layer"].value == self.LAYER_III:
if self["version"].value == self.MPEG_I:
return (frame_size * 144) // sample_rate + padding
else:
return (frame_size * 72) // sample_rate + padding
elif self["layer"].value == self.LAYER_II:
return (frame_size * 144) // sample_rate + padding
else: # self.LAYER_I:
frame_size = (frame_size * 12) // sample_rate
return (frame_size + padding) * 4
def getNbChannel(self):
return self.NB_CHANNEL[self["channel_mode"].value]
def createDescription(self):
info = ["layer %s" % self["layer"].display]
bit_rate = self.getBitRate()
if bit_rate:
info.append("%s/sec" % humanBitSize(bit_rate))
sampling_rate = self.getSampleRate()
if sampling_rate:
info.append(humanFrequency(sampling_rate))
return "MPEG-%s %s" % (self["version"].display, ", ".join(info))
def findSynchronizeBits(parser, start, max_size):
"""
Find synchronisation bits (11 bits set to 1)
Returns None on error, or number of bytes before the synchronization.
"""
address0 = parser.absolute_address
end = start + max_size
size = 0
while start < end:
# Fast search: search 0xFF (first byte of sync frame field)
length = parser.stream.searchBytesLength("\xff", False, start, end)
if length is None:
return None
size += length
start += length * 8
# Strong validation of frame: create the frame
# and call method isValid()
try:
frame = createOrphanField(parser, start - address0, Frame, "frame")
valid = frame.isValid()
except Exception:
valid = False
if valid:
return size
# Invalid frame: continue
start += 8
size += 1
return None
class Frames(FieldSet):
# Padding bytes allowed before a frame
MAX_PADDING = 256
def synchronize(self):
addr = self.absolute_address
start = addr + self.current_size
end = min(start + self.MAX_PADDING * 8, addr + self.size)
padding = findSynchronizeBits(self, start, end)
if padding is None:
raise ParserError("MPEG audio: Unable to find synchronization bits")
if padding:
return PaddingBytes(self, "padding[]", padding, "Padding before synchronization")
else:
return None
def looksConstantBitRate(self, count=10):
"""
Guess if frames are constant bit rate. If it returns False, you can
be sure that frames are variable bit rate. Otherwise, it looks like
constant bit rate (on first count fields).
"""
check_keys = ("version", "layer", "bit_rate")
last_field = None
for index, field in enumerate(self.array("frame")):
if last_field:
for key in check_keys:
if field[key].value != last_field[key].value:
return False
last_field = field
if index == count:
break
return True
def createFields(self):
# Find synchronisation bytes
padding = self.synchronize()
if padding:
yield padding
while self.current_size < self.size:
yield Frame(self, "frame[]")
# padding = self.synchronize()
# if padding:
# yield padding
# Read raw bytes at the end (if any)
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "raw", size)
def createDescription(self):
if self.looksConstantBitRate():
text = "(looks like) Constant bit rate (CBR)"
else:
text = "Variable bit rate (VBR)"
return "Frames: %s" % text
def createMpegAudioMagic():
# ID3v1 magic
magics = [("TAG", 0)]
# ID3v2 magics
for ver_major in ID3v2.VALID_MAJOR_VERSIONS:
magic = "ID3%c\x00" % ver_major
magics.append((magic, 0))
# MPEG frame magic
# TODO: Use longer magic: 32 bits instead of 16 bits
SYNC_BITS = 2047
for version in Frame.VERSION_NAME.iterkeys():
for layer in Frame.LAYER_NAME.iterkeys():
for crc16 in (0, 1):
magic = (SYNC_BITS << 5) | (version << 3) | (layer << 1) | crc16
magic = long2raw(magic, BIG_ENDIAN, 2)
magics.append((magic, 0))
return magics
class MpegAudioFile(Parser):
PARSER_TAGS = {
"id": "mpeg_audio",
"category": "audio",
"file_ext": ("mpa", "mp1", "mp2", "mp3"),
"mime": (u"audio/mpeg",),
"min_size": 4 * 8,
# "magic": createMpegAudioMagic(),
"description": "MPEG audio version 1, 2, 2.5",
"subfile": "skip",
}
endian = BIG_ENDIAN
def validate(self):
if self[0].name in ("id3v2", "id3v1"):
return True
if not self.stream.checked: # TODO: is it possible to handle piped input?
return False
# Validate first 5 frames
for index in xrange(5):
try:
frame = self["frames/frame[%u]" % index]
except MissingField:
# Require a least one valid frame
if (1 <= index) \
and self["frames"].done:
return True
return "Unable to get frame #%u" % index
except (InputStreamError, ParserError):
return "Unable to create frame #%u" % index
# Check first frame values
if not frame.isValid():
return "Frame #%u is invalid" % index
# Check that all frames are similar
if not index:
frame0 = frame
else:
if frame0["channel_mode"].value != frame["channel_mode"].value:
return "Frame #%u channel mode is different" % index
return True
def createFields(self):
# Read ID3v2 (if any)
if self.stream.readBytes(0, 3) == "ID3":
yield ID3v2(self, "id3v2")
if self._size is None: # TODO: is it possible to handle piped input?
raise NotImplementedError
# Check if file is ending with ID3v1 or not and compute frames size
frames_size = self.size - self.current_size
addr = self.size - 128 * 8
if 0 <= addr:
has_id3 = (self.stream.readBytes(addr, 3) == "TAG")
if has_id3:
frames_size -= 128 * 8
else:
has_id3 = False
# Read frames (if any)
if frames_size:
yield Frames(self, "frames", size=frames_size)
# Read ID3v1 (if any)
if has_id3:
yield ID3v1(self, "id3v1")
def createDescription(self):
if "frames" in self:
frame = self["frames/frame[0]"]
return "%s, %s" % (frame.description, frame["channel_mode"].display)
elif "id3v2" in self:
return self["id3v2"].description
elif "id3v1" in self:
return self["id3v1"].description
else:
return "MPEG audio"
def createContentSize(self):
# Get "frames" field
field = self[0]
if field.name != "frames":
try:
field = self[1]
except MissingField:
# File only contains ID3v1 or ID3v2
return field.size
# Error: second field are not the frames"?
if field.name != "frames":
return None
# Go to last frame
frames = field
frame = frames["frame[0]"]
address0 = field.absolute_address
size = address0 + frame.size
while True:
try:
# Parse one MPEG audio frame
frame = createOrphanField(frames, size - address0, Frame, "frame")
# Check frame 32 bits header
if not frame.isValid():
break
except Exception:
break
if MAX_FILESIZE < (size + frame.size):
break
size += frame.size
# ID3v1 at the end?
try:
if self.stream.readBytes(size, 3) == "TAG":
size += ID3v1.static_size
except InputStreamError:
pass
return size
| SickGear/SickGear | lib/hachoir_py2/parser/audio/mpeg_audio.py | Python | gpl-3.0 | 13,857 |
__license__ = 'GPL v3'
__copyright__ = '2014, Emily Palmieri <silentfuzzle@gmail.com>'
from calibre.gui2.viewer.behavior.base_behavior import BaseBehavior
# This class defines an Adventurous Reader behavior where users can only view one section of the ebook at a time.
class BaseAdventurousBehavior (BaseBehavior):
# Constructor
# setup_vscrollbar_method (method) - the method setting up the scrollbar and the position displayed in the upper left
def __init__(self, setup_vscrollbar_method):
BaseBehavior.__init__(self)
self.setup_vscrollbar_method = setup_vscrollbar_method
# Sets the current section of the book the user is viewing
# and the number of pages in that section
# curr_index (integer) - the index of the current section in the spine
# curr_sec (SpineItem) - the current section being displayed
def set_curr_sec(self, curr_index, curr_sec):
if (self.curr_sec != curr_sec):
super(BaseAdventurousBehavior, self).set_curr_sec(curr_index, curr_sec)
# Only setup the scrollbar and position label if this object is an instance of this class
if (type(self) is BaseAdventurousBehavior):
self.num_pages = curr_sec.pages
self.setup_vscrollbar_method()
# Returns whether the user can move from the current section to the passed section
# next_sec (string) - the section to check
def allow_page_turn(self, next_sec):
if (self.curr_sec is None):
# The book doesn't have a bookmark with the user's last position
# Allow returning to the beginning of the book
return True
return False
# Returns the user's position relative to the section and update the absolute position
# Sets the new absolute position in the book
# frac (number) - the scrollbar's position in relation to the current displayed section of the book
def calculate_page_label(self, frac):
section_position = frac*float(self.get_section_pages(self.curr_sec))
self.absolute_position = self.curr_sec.start_page + section_position
return section_position + 1
# Moves the user to the passed page number using the passed method
# new_page (number) - the page to move the user to as set by the scrollbar or position label
# goto_page_method (method) - the method to use to move with
def goto_page(self, new_page, goto_page_method):
abs_pos = self.update_page_label(new_page)
goto_page_method(abs_pos, check_allow_page_turn=True)
# Returns the user's absolute position in the ebook given a position set by the scrollbar or position label
# new_page (number) - the page to move the user to as set by the scrollbar or position label
def update_page_label(self, new_page):
return new_page + self.curr_sec.start_page - 1
| silentfuzzle/calibre | src/calibre/gui2/viewer/behavior/adventurous_base_behavior.py | Python | gpl-3.0 | 2,934 |
# coding: utf8
import os
import re
from inspect import isfunction
import pygame as pg
from pygame.locals import *
from ColorHelper import ColorHelper
from Singleton import Singleton
from tools import py_encode_font_txt, py_encode_title
if not pg.font: print 'Warning, fonts disabled'
if not pg.mixer: print 'Warning, sound disabled'
# TODO: Faire de belles erreurs, en utilisant Dialog pour les afficher
# TODO: Séparer le son du WindowHelper
# TODO: Faire un Ressource Manager, pour une gestion des ressources avancée
# TODO: Les erreurs de pygame sont meilleures, ne pas try si c'est pour raise aussitôt -> il faut résoudre l'erreur ou faire print et la re-raise
@Singleton
class WindowHelper:
@staticmethod
def Instance():
"""
This function is a workaround to benefit autocompletion
:rtype: WindowHelper
"""
return WindowHelper.Instance()
def __init__(self):
self.elements = {} # éléments (toute sorte !)
self.images = dict() # TODO: dict d'images pour les précharger
self.colors = {} # couleurs
self.fonts = {} # liste des polices
self.pages = {} # liste des pages
self.current_page = -1 # page active
self.win = None # Fenêtre pygame
self.opened = False
self.resizable = True
self.templates = dict()
self.event_posters = []
pg.init()
def __del__(self):
del self.elements
del self.pages
try:
self.opened = False
self.close()
pg.quit()
except AttributeError:
pass
def open_window(self, width=None, height=None, resizable=None):
"""
Open a new window of size width * height
:param width: width of the window (default 500px)
:param height: height of the window (default 500px)
:param resizable: is the window resizable ? (defaut True)
:type resizable: bool
"""
if width is None:
width = 500
if height is None:
height = 500
if resizable is None:
resizable = self.resizable
if resizable:
self.win = pg.display.set_mode((width, height), RESIZABLE)
else:
self.win = pg.display.set_mode((width, height))
self.resizable = resizable
# Quelques ressources initialisées par défaut
if not self.opened:
self.new_font('Arial', 30, 'default')
self.new_color('black')
self.new_color('white')
else:
self.refresh()
self.opened = True
def is_open(self):
"""
:return: return if a window already is opened
"""
return self.opened
def close(self):
"""
Close the current window
"""
try:
pg.display.quit()
except AttributeError:
self.opened = False
def callback_close(self):
"""
Set the opened var to False, used in callback to inform the helper
to close the window
"""
self.opened = False
def quit(self):
"""
Close pygame session
"""
self.__del__()
def new_page(self, title, width=None, height=None, label=None, bg=None):
"""
Define a new page. Use the go_to method to navigate between pages.
The open_window method is not required before using this one. If no
window is already opened, open_windo will automatically be called.
:param title: title of the page
:param width: width of the page (default, current width)
:param height: height of the page (default, current height)
:param label: label of the page
:param bg: color_background of the page
:type bg: str or tuple or ColorHelper
:return: label of the page
"""
if not self.is_open():
self.open_window(width, height)
if label is None:
label = len(self.pages)
if bg is None:
bg = self.new_color('black') # récupère le label de la couleur black
elif (isinstance(bg, str) and bg not in self.colors.keys()) or \
isinstance(bg, tuple) or \
isinstance(bg, ColorHelper):
bg = self.new_color(bg)
p_width, p_height = pg.display.get_surface().get_size()
if height is not None or width is not None:
self.open_window(width, height)
if width is None:
width = p_width
if height is None:
height = p_height
self.pages[label] = {
'title': title,
'width': width,
'height': height,
'bg': bg,
'elements': []
}
print "New page created with label '%s'." % label
return label
def go_to(self, label):
"""
Change page to the labeled page
:return: label of the new page
"""
self.current_page = label
pg.display.set_caption(py_encode_title(self.pages[label]['title']))
self.reset()
self.print_page(label)
return label
def page_exists(self, page):
"""
Return if the page asked exists
:param page: label of the page
:rtype: bool
"""
return page in self.pages.keys()
def nb_use(self, label, num=1):
"""
Définit le nombre de fois qu'un élément peut être affiché avant d'être automatiquement supprimé
param: label de l'élément
param: num de fois que l'élément peut être utilisé
return: label
"""
# TODO: Utiliser les secondes ou un nombre
self.elements[label]['nb_usable'] = num
return label
def new_color(self, color, label=None, overwrite=True):
"""
Create a new color
:param color: color
:type color: str or tuple or ColorHelper
if the type is str, it has to be a ColorHelper colorname
:param label: label to the color (default, auto set label)
:param overwrite: if True and another color with the same label already exists,
it will overwrite it
:return: label of the new color, False if the label already exists and overwrite is False
"""
if label is None:
if isinstance(color, str) and (color, color) not in self.colors.items():
label = color
else:
label = len(self.colors)
if label in self.colors.keys() and not overwrite:
return False
if isinstance(color, str) or isinstance(color, tuple):
self.colors[label] = ColorHelper(color)
elif isinstance(color, ColorHelper):
self.colors[label] = color
else:
raise ValueError("L'attribut color doit être du type ColorHelper, string ou tuple.")
return label
def new_font(self, family, size, label=None, opt=None, overwrite=False):
"""
Create a new font
:param family: family name of the font (example: Arial)
:param size: size of the font
:param label: label to set te the font (Default, auto set)
:param opt: options for the text (underline, bold, italic, anti-aliasing)
default dict :
{
'anti_aliasing': True,
'bold': False,
'italic': False,
'underline': False
}
:type opt: dict
:param overwrite: if True and another font with the same label already exists,
it will overwrite it
:return: label of the font, False if the label already exists and overwrite is False
"""
if label is None:
label = family + str(size)
if label in self.fonts.keys() and not overwrite:
return False
if opt is None:
opt = {}
elem = {
'family': family,
'size': size,
'font': pg.font.SysFont(family, size),
'anti_aliasing': True,
'bold': False,
'italic': False,
'underline': False
}
elem.update(opt)
# Mise à jour des options visuelles
if elem['bold']:
elem['font'].set_bold(True)
if elem['italic']:
elem['font'].set_italic(True)
if elem['underline']:
elem['font'].set_underline(True)
self.fonts[label] = elem
return label
def new_text(self, text, font, color, label=None, add_to_page=None,
overwrite=True): # TODO: Possibilité de mettre un fond au texte
"""
Create a new text
:param text: Text
:param font: label of the font
:type font: str
:param color: label of the color
:param label: label for the new text
:param add_to_page: a new elements is not automatically added to the pages. Set this
parameter to the label of a page and it will be added to the page.
:param overwrite: if True and another text with the same label already exists,
it will overwrite it
:return: label of the text, False if the label already exists and overwrite is False
"""
if label is None:
label = self.get_unused_label()
if label in self.elements.keys() and not overwrite:
return False
try:
obj = self.fonts[font]['font'].render(py_encode_font_txt(text), self.fonts[font]['anti_aliasing'],
self.colors[color].get_rgb())
except Exception as e:
raise ValueError("Pygame error : %s" % e)
elem = {
'type': 'text',
'color': color,
'font': font,
'content': text,
'obj': obj,
'nb_usable': -1
}
self.elements[label] = elem
if add_to_page is not None:
self.add(label, page=add_to_page)
return label
def new_img(self, url, alpha=False, label=None, add_to_page=None, overwrite=False):
"""
Create a new image
:param url: address of the image
:param alpha: if the image uses alpha
:type alpha: bool
:param label: label of the image (default, auto set)
:param add_to_page: label of the page to add on. If None, the element is not added (call add to do so)
:param overwrite: if True and another image with the same label already exists,
it will overwrite it
:return: label of the image, False if the label already exists and overwrite is False
"""
if label is None:
label = self.get_unused_label()
if label in self.elements.keys() and not overwrite:
return False
if alpha:
# try:
bg = pg.image.load(url).convert_alpha()
# except Exception as e:
# raise ValueError("Can't import image : %s" % e)
# except:
# raise ImportError("The " + url + " image cannot be loaded.")
else:
# try:
bg = pg.image.load(url).convert()
# except Exception as e:
# raise ValueError("Can't import image : %s" % e)
# except:
# raise ImportError("The " + url + " image cannot be loaded.")
elem = {
'type': 'img',
'content': url,
'alpha': alpha,
'obj': bg,
'nb_usable': -1
}
self.elements[label] = elem
if add_to_page is not None:
self.add(label, page=add_to_page)
return label
def new_rect(self, color, border, label=None, add_to_page=None, overwrite=True):
"""
Create a new rectangle
:param color: label of the color
:type color: str
:param border: border of the rectangle, if 0 the rectangle is filled
:param label: label of the rectangle (default, auto set)
:param add_to_page: label of the page to add on. If None, the element is not added (call add to do so)
:param overwrite: if True and another image with the same label already exists,
it will overwrite it
:return: label of the rectangle, False if the label already exists and overwrite is False
"""
if label is None:
label = self.get_unused_label()
if label in self.elements.keys() and not overwrite:
return False
elem = {
'type': 'rect',
'color': color,
'border': border,
'nb_usable': -1
}
self.elements[label] = elem
if add_to_page is not None:
self.add(label, page=add_to_page)
return label
def new_circle(self, color, radius, border, label=None, add_to_page=None, overwrite=True):
"""
Create a new circle
:param color: label of the color
:type color: str
:param radius: radius of the circle
:param border: border width of the circle, if 0 the circle is filled
:param label: label of the circle (default, auto set)
:param add_to_page: label of the page to add on. If None, the element is not added (call add to do so)
:param overwrite: if True and another image with the same label already exists,
it will overwrite it
:return: label of the circle, False if the label already exists and overwrite is False
"""
if label is None:
label = self.get_unused_label()
if label in self.elements.keys() and not overwrite:
return False
elem = {
'type': 'circle',
'color': color,
'radius': radius,
'border': border,
'nb_usable': -1
}
self.elements[label] = elem
if add_to_page is not None:
self.add(label, page=add_to_page)
return label
def new_fill(self, color, label=None, add_to_page=None, overwrite=True):
"""
prepare to fill a window, call add to add the fill to the page
:param color: label of the color
:type color: str
:param label: label of the fill (default, auto set)
:param add_to_page: label of the page to add on. If None, the element is not added (call add to do so)
:param overwrite: if True and another image with the same label already exists,
it will overwrite it
:return: label of the fill, False if the label already exists and overwrite is False
"""
if label is None:
label = self.get_unused_label()
if label in self.elements.keys() and not overwrite:
return False
elem = {
'type': 'fill',
'color': color,
'nb_usable': -1
}
self.elements[label] = elem
if add_to_page is not None:
self.add(label, page=add_to_page)
return label
def new_sound(self, url, label=None, add_to_page=None, overwrite=False):
"""
Create a new sound
:param url: address to the sound
:param label: label of the sound (default, auto set)
:param add_to_page: label of the page to add on. If None, the element is not added (call add to do so)
:param overwrite: if True and another image with the same label already exists,
it will overwrite it
:return: label of the sound, False if the label already exists and overwrite is False
"""
if label is None:
label = self.get_unused_label()
if label in self.elements.keys() and not overwrite:
return False
# Tente de charger le son
sound = pg.mixer.Sound(url)
if sound.get_length() < 0.001:
raise ValueError("The " + url + " sound cannot be loaded.")
elem = {
'type': 'sound',
'url': url,
'obj': sound,
'playing': False,
'nb_usable': -1
}
self.elements[label] = elem
if add_to_page is not None:
self.add(label, page=add_to_page)
return label
def play_sound(self, label):
"""
Play a sound
:param label: label of the sound
"""
# if not self.elements[label]['playing']:
self.elements[label]['obj'].play()
# self.elements[label]['playing'] = True
def stop_sound(self, label):
"""
Stop a sound
:param label: label of the sound
"""
# if self.elements[label]['playing']:
self.elements[label]['obj'].stop()
# self.elements[label]['playing'] = False
def is_mixer_busy(self):
"""
Is the mixer busy ?
:return: True if it is, False otherwise
"""
return pg.mixer.get_busy()
def new_menu(self, choices, label=None, add_to_page=None):
"""
Create a menu
:param choices: the choices of the menu in a list
exemple : ["Option 1", "Option 2"]
To learn more, see the wiki (french) : https://github.com/totorigolo/WiiQuizz/wiki/WindowHelper#ajouter-un-menu
:type choices: list
:param label: label of the menu
:param add_to_page: label of the page to add on. If None, the element is not added (call add_menu to do so)
:return: label of the menu
"""
if label is None:
label = len(self.elements)
elem = {
'type': 'menu',
'choices': choices,
'result': None,
'nb_usable': -1
}
self.elements[label] = elem
if add_to_page == 'current':
self.add(self.current_page)
elif isinstance(add_to_page, int) or isinstance(add_to_page, str):
self.add(add_to_page)
return label
def get_menu_result(self, label):
"""
Return the outcome of a menu.
A result may be found only if an option of the menu has been selected
:param label: label of the menu
:return: Result of the menu
"""
return self.elements[label]['result']
def get_element(self, label):
"""
Return an element
:param label: label of the element
:return: the element
"""
return self.elements[label]
def edit_color(self, label, color):
"""
Edit the color of an element
:param label: label of the element to edit
:param color: new label of the color
:type color: str
:return: True is the element has an color attribute, False otherwise
:rtype: bool
"""
if 'color' in self.elements[label].keys():
if self.elements[label]['type'] == 'text':
font = self.elements[label]['font']
text = self.elements[label]['content']
try:
self.elements[label]['obj'] = self.fonts[font]['font'].render(py_encode_font_txt(text),
self.fonts[font]['anti_aliasing'],
self.colors[color].get_rgb())
except Exception as e:
raise ValueError("The color cannot be changed : %s" % e)
self.elements[label]['color'] = color
return True
return False
def edit_text(self, label, text):
"""
Edit the text of an element
:param label: label of the element
:param text: new text
:return: True if the element has a text attribute, False otherwise
:rtype: bool
"""
if self.elements[label]['type'] == 'text':
font = self.elements[label]['font']
color = self.elements[label]['color']
self.elements[label]['content'] = text
try:
self.elements[label]['obj'] = self.fonts[font]['font'].render(py_encode_font_txt(text),
self.fonts[font]['anti_aliasing'],
self.colors[color].get_rgb())
except Exception as e:
raise ValueError("The text cannot be changed : %s" % e)
return True
return False
def edit_border(self, label, border):
"""
Edit the border of an element
:param label: label of the element
:param border: new border
:return: True if the element has a border attribute, False otherwise
:rtype: bool
"""
if 'border' in self.elements[label].keys():
self.elements[label]['border'] = border
return True
return False
def add(self, label, x='centered', y='centered', page=None):
"""
Add an element to the page
:param label: label of the element to add
:param x: x coordinate. Can be an int, 'centered' or a string using math and keywords : ('top', 'bottom', 'left'
('right', 'x_center', 'y_center', 'self_width' and 'self_height'). See the wiki (in french) to learn more
https://github.com/totorigolo/WiiQuizz/wiki/WindowHelper#ajouter-des-%C3%A9l%C3%A9ments-%C3%A0-une-page
:param y: y coordinate.
:param page: label of the page to add, if None or 'current',
it will be added to the current page (if it exists)
:return: True if element added, False otherwise
"""
if page is None or page == 'current':
if self.current_page == -1:
return False
page = self.current_page
if label not in self.elements.keys():
return False
elem = {
'label': label,
'x': x,
'y': y,
'visible': True,
'nb_recursion': -1 # récursion infinie
}
self.pages[page]['elements'].append(elem)
return True
def add_menu(self, label, x='centered', y='centered', before_fun=None, after_fun=None, opt=None, vars=None,
page=None):
"""
Add a menu to a page
:param label: label of the menu
:param x: x coordinate. Can be an int, 'centered' or a string using math and keywords : ('top', 'bottom', 'left'
('right', 'x_center', 'y_center', 'self_width' and 'self_height'). See the wiki (in french) to learn more
https://github.com/totorigolo/WiiQuizz/wiki/WindowHelper#ajouter-des-%C3%A9l%C3%A9ments-%C3%A0-une-page
:param y: y coordinate.
:param before_fun: callback function called at the begining of each loop.
This function must take 4 params : pg (a pygame instance), win (a windowHelper instance),
vars (the vars given in the vars param of this method) and menu (the choices param)
:param after_fun: callback function called at the end of each loop
This function must take 4 params : pg (a pygame instance), win (a windowHelper instance),
vars (the vars given in the vars param of this method) and menu (the choices param)
:param opt: Options on the text. Default options :
{
"font": "default", label of the font for the options
"color": "white", label of the color fot the options
"border": None, if the text has a border
"font_active": "default", label of the font for an active option
"color_active": "white", label of the color for an active option
"border_active": None, border of the text for an active option
"margin": 20 margin between options
}
:param vars: vars to pass on callback functions
:param page: label of the page to add on
:return: True if the menu has been added, False otherwise
Get the result of the menu by calling the get_menu_result method
"""
if page is None:
if self.current_page == -1:
return False
page = self.current_page
if label not in self.elements.keys():
return False
if opt is None:
opt = {}
if vars is None:
vars = {}
elem = {
'label': label,
'x': x,
'y': y,
'visible': True,
'nb_recursion': 1,
'before_fun': before_fun,
'after_fun': after_fun,
'vars': vars,
'opt': opt
}
self.pages[page]['elements'].append(elem)
return True
def remove(self, label, page=None):
"""
Delete the first instance of an element added to a given page.
:param label: label of the element
:param page: label of the page
:return: True if deleted, False otherwise
"""
# TODO: Faire des tests
# TODO: Si son, le stoper
if page is None:
page = self.current_page
def delete_from_page_elements(self, page, label):
try:
for elem_info in self.pages[page]['elements']:
try:
if elem_info['label'] == label:
self.pages[page]['elements'].remove(elem_info)
return True
except KeyError:
print "remove() : problem when deleting element."
except KeyError:
print "remove() : page %s doesn't exist." % page
return False
return delete_from_page_elements(self, page, label)
def destroy(self, label, page=None):
"""
Delete the first instance of an element added to a given page, and from win.elements
:param label: label of the element
:param page: label of the page
:return: True if deleted, False otherwise
"""
# TODO: Faire des tests
# TODO: Si son, le stoper
if page is None:
page = self.current_page
def delete_from_page_elements(self, page, label):
try:
for elem_info in self.pages[page]['elements']:
try:
if elem_info['label'] == label:
self.pages[page]['elements'].remove(elem_info)
return True
except KeyError:
print "destroy(%s, %s) : problem when deleting element." % (label, page)
except KeyError:
print "destroy(%s, %s) : page %s doesn't exist." % (page, label, page)
return False
def delete_from_elements(self, label):
if label in self.elements:
self.elements.pop(label)
return True
print "destroy() : element %s not in win.elements." % label
return False
r1 = delete_from_elements(self, label)
r2 = delete_from_page_elements(self, page, label)
return r1 and r2
def print_page(self, page=None):
"""
Print a page
:param page: label of a page
:return: True if the page has been printed, False otherwise
"""
if page is None:
page = self.current_page
if page not in self.pages:
return False
num = 0
while num < len(self.pages[page]['elements']):
num = self.print_elem(num, page)
if self.is_open():
pg.display.flip()
return True
def print_elem(self, num, page=None):
"""
Print a single element
:param num: number of the element
:param page: label of the page
:return: True if the element has been printed, False otherwise
"""
if page is None:
page = self.current_page
elem_info = self.pages[page]['elements'][num]
if not self.exists(elem_info['label']):
return num + 1
elem = self.elements[elem_info['label']]
if elem_info['visible'] and elem['nb_usable'] != 0: # Si l'élément est visible
if elem['nb_usable'] != -1:
elem['nb_usable'] -= 1
if elem_info['nb_recursion'] != 0: # nb de récursion déterminé et infinie (-1)
if elem_info['nb_recursion'] > 0:
self.pages[page]['elements'][num]['nb_recursion'] -= 1
if elem['type'] == 'rect': # Si rectangle à afficher
self._print_rect(num, page)
elif elem['type'] == 'circle': # Si cercle à afficher
self._print_circle(num, page)
elif elem['type'] == 'fill':
self.fill(elem['color'])
elif elem['type'] == 'menu': # Si menu à afficher
self._print_menu(num, page)
else: # Tout autre ressource à afficher
p_width, p_height = pg.display.get_surface().get_size()
changes = {
'top': "0",
'left': "0",
'right': str(p_width),
'bottom': str(p_height),
'x_center': str(p_width / 2),
'y_center': str(p_height / 2),
'self_width': str(elem['obj'].get_rect().width),
'self_height': str(elem['obj'].get_rect().height)
}
x = str(elem_info['x'])
y = str(elem_info['y'])
if x == 'centered':
x = str((p_width - elem['obj'].get_rect().width) / 2)
if y == 'centered':
y = str((p_height - elem['obj'].get_rect().height) / 2)
for k, v in changes.items():
x = x.replace(k, v)
y = y.replace(k, v)
x = eval(x)
y = eval(y)
self.win.blit(elem['obj'], (x, y))
if elem_info['nb_recursion'] > 0:
self.pages[page]['elements'][num]['nb_recursion'] += 1
if elem['nb_usable'] == 0:
del self.elements[elem_info['label']]
del self.pages[page]['elements'][num]
num += 1
return num + 1
def _print_rect(self, num, page=None):
"""
Affichage d'un rectangle
"""
if page is None:
page = self.current_page
elem_info = self.pages[page]['elements'][num]
if elem_info['visible']: # Si l'élément est visible
elem = self.elements[elem_info['label']]
p_width, p_height = pg.display.get_surface().get_size()
x = elem_info['x']
y = elem_info['y']
if not isinstance(x, list) or not isinstance(x, list):
raise
x1, x2, y1, y2 = str(x[0]), str(x[1]), str(y[0]), str(y[1])
changes = {
'top': "0",
'left': "0",
'right': str(p_width),
'bottom': str(p_height),
'x_center': str(p_width / 2),
'y_center': str(p_height / 2)
}
for k, v in changes.items():
x1 = x1.replace(k, v)
x2 = x2.replace(k, v)
y1 = y1.replace(k, v)
y2 = y2.replace(k, v)
x1 = eval(x1)
x2 = eval(x2)
y1 = eval(y1)
y2 = eval(y2)
if isinstance(elem['color'], tuple):
color = elem['color']
else:
color = self.colors[elem['color']].get_rgb()
pg.draw.rect(self.win, color, [x1, y1, x2, y2], elem['border'])
def _print_menu(self, num, page=None):
"""
Affichage un menu
"""
if page is None:
page = self.current_page
elem_info = self.pages[page]['elements'][num]
elem = self.elements[elem_info['label']]
menu = elem['choices']
opt = elem_info['opt']
options = {
"font": "default",
"color": "white",
"border": None,
"color_active": "white",
"border_active": None,
"font_active": "default",
"margin": 20
}
options.update(opt) # options d'affichage
vars = elem_info['vars']
elem_x, elem_y = elem_info['x'], elem_info['y']
before_fun, after_fun = elem_info['before_fun'], elem_info['after_fun']
width_win, height_win = pg.display.get_surface().get_size()
choix = 0
done = False
pressed = False
clock = pg.time.Clock()
while not done:
x, y = elem_x, elem_y
clock.tick(10) # Ne boucle que 10 fois/sec
if before_fun is not None:
vars.update(before_fun(pg, self, vars, menu))
# Boucle d'événement
for ep in self.event_posters:
ep.post_events()
if 'event_poster' in opt:
opt['event_poster'].post_events()
for event in pg.event.get():
if event.type == QUIT:
done = True
self.callback_close()
elif event.type == KEYDOWN:
if event.key == K_RETURN or event.key == K_KP_ENTER:
done = True
pressed = True
elif event.key == K_UP:
choix -= 1
elif event.key == K_DOWN:
choix += 1
elif event.type == USEREVENT: # TODO: Correspond aux Wiimotes (renommer USEREVENT)
if event.wiimote_id == 'master' and event.pressed:
if event.btn == 'HAUT':
choix -= 1
elif event.btn == 'BAS':
choix += 1
elif event.btn == 'A':
done = True
pressed = True
elif event.type == VIDEORESIZE:
width_win, height_win = event.w, event.h
self.open_window(width_win, height_win) # On re-taille la fenêtre
choix %= len(menu)
k = 0
if after_fun is not None:
vars.update(after_fun(pg, self, vars, menu))
self.refresh() # On raffréchit la page
for i, m in enumerate(menu):
if isinstance(m, list):
text = m[0]
callback = m[1]
if pressed and choix == i and isinstance(callback, str):
if callback.lower() == 'close':
callback = 'callback_close'
callback = "self." + callback + "("
for j in range(2, len(m)):
callback += str(m[j])
if j != len(m) - 1:
callback += ", "
callback += ")"
eval(callback)
elif pressed and choix == i and isfunction(callback):
params = "("
for j in range(2, len(m)):
params += str(m[j])
if j != len(m) - 1:
params += ", "
params += ")"
callback(eval(params))
elif isinstance(m, list):
text = m[0]
else:
text = m
if not done and self.is_open():
if choix == k:
if options["border_active"] is not None:
txt = self.fonts[options["font_active"]]['font'].render(py_encode_font_txt(text),
self.fonts[options["font_active"]][
'anti_aliasing'],
self.colors[options[
"color_active"]].get_rgb(),
self.colors[options[
"border_active"]].get_rgb())
else:
txt = self.fonts[options["font_active"]]['font'].render(py_encode_font_txt(text),
self.fonts[options["font_active"]][
'anti_aliasing'],
self.colors[options[
"color_active"]].get_rgb())
else:
if options["border"] is not None:
txt = self.fonts[options["font"]]['font'].render(py_encode_font_txt(text),
self.fonts[options["font_active"]][
'anti_aliasing'],
self.colors[options["color"]].get_rgb(),
self.colors[options["border"]].get_rgb())
else:
txt = self.fonts[options["font"]]['font'].render(py_encode_font_txt(text),
self.fonts[options["font_active"]][
'anti_aliasing'],
self.colors[options["color"]].get_rgb())
if elem_x == "centered":
x = (width_win - txt.get_rect().width) / 2
if y == "centered":
y = 0 # TODO: ajouter l'auto-centrage pour les y
if k == 0:
if 'x' in vars.keys():
x = vars['x']
if 'y' in vars.keys():
y = vars['y']
self.win.blit(txt, (x, y))
y += txt.get_rect().height + options["margin"]
k += 1
if self.is_open():
pg.display.flip()
if not self.is_open():
self.close()
self.elements[elem_info['label']]['result'] = choix
def _print_circle(self, num, page=None):
"""
Affichage d'un cercle
"""
if page is None:
page = self.current_page
elem_info = self.pages[page]['elements'][num]
if elem_info['visible']: # Si l'élément est visible
elem = self.elements[elem_info['label']]
p_width, p_height = pg.display.get_surface().get_size()
x = str(elem_info['x'])
y = str(elem_info['y'])
changes = {
'top': "0",
'left': "0",
'right': str(p_width),
'bottom': str(p_height),
'x_center': str(p_width / 2),
'y_center': str(p_height / 2)
}
radius = elem['radius']
if x == 'centered':
x = str(p_width - elem['radius'])
if y == 'centered':
y = str(p_height - elem['radius'])
for k, v in changes.items():
x = x.replace(k, v)
y = y.replace(k, v)
x = eval(x)
y = eval(y)
color = self.colors[elem['color']].get_rgb()
pg.draw.circle(self.win, color, [x, y], radius, elem['border'])
def event(self, before_fun=None, event_fun=None, after_fun=None, vars=None, fps=10):
"""
Start an event
:param before_fun: callback function called at the begining of each loop.
This function must take 3 params : pg (a pygame instance), win (a windowHelper instance),
vars (the vars given in the vars param of this method)
:param event_fun: callback function called in the event for loop.
This function must take 4 params : pg (a pygame instance), win (a windowHelper instance),
vars (the vars given in the vars param of this method) and event (the event)
:param after_fun: callback function called at the end of each loop with the same params of before_fun
:param vars: vars to pass on the callback functions
:param fps: number of loop per second
"""
if vars is None:
vars = {}
done = False
clock = pg.time.Clock()
while not done:
clock.tick(fps) # Limite le framerate
if before_fun is not None:
done = done or before_fun(pg, self, vars)
for ep in self.event_posters:
ep.post_events()
if 'event_poster' in vars:
vars['event_poster'].post_events()
for event in pg.event.get():
if event.type == QUIT:
done = True
self.callback_close()
elif event.type == VIDEORESIZE:
self.open_window(event.w, event.h)
if event_fun is not None:
done = done or event_fun(pg, self, vars, event)
if after_fun is not None:
done = done or after_fun(pg, self, vars)
if self.is_open():
pg.display.flip()
if not self.is_open():
break
if not self.is_open():
self.close()
def register_event_poster(self, event_poster):
"""
Enregistre un Event Poster, c'est-à-dire un objet qui va ajouter des éléments dans pygame.event
:param event_poster: Un objet ayant une fonction post_events()
"""
self.event_posters.append(event_poster)
print "Event poster added : %s" % event_poster
def remove_event_poster(self, event_poster):
"""
Supprime un Event Poster, c'est-à-dire un objet qui ajoute des éléments dans pygame.event
:param event_poster: Un objet ayant une fonction post_events()
"""
try:
self.event_posters.remove(event_poster)
print "Event poster removed : %s" % event_poster
except ValueError:
print "Event poster absent from list : %s" % event_poster
def reset(self):
"""
Reset the current page
"""
try:
color = self.colors[self.pages[self.current_page]['bg']]
except KeyError:
# TODO: Utiliser une erreur perso
print "Can't reset(), either no current page or no background color."
else:
self.win.fill(color.get_rgb())
def refresh(self):
"""
Refresh the current page
"""
self.reset()
self.print_page()
def dump_elements(self, page=None, destroy=False):
"""
Dump all elements of a given page
:param page: label of the page
:param destroy: delete the element from win.elements
:return: None if the page does not exist
"""
if page is None:
page = self.current_page
if page not in self.pages:
print "dump_elements() : la page %s n'existe pas" % page
return
try:
for label in [e['label'] for e in self.pages[page]['elements']]:
if not destroy:
r = self.remove(label, page)
# print "Element %s deleted : %s" % (label, r)
else:
r = self.destroy(label, page)
# print "Element %s destroyed : %s" % (label, r)
except KeyError:
print 'dump_elements() : la page %s est invalide' % page
def delete_page(self, page=None, destroy=False):
"""
Delete a page (along with the elements on that page)
:param page: label of a page
:return: True if the page is deleted, False otherwise
"""
if page is None:
page = self.current_page
if page == self.current_page:
self.current_page = -1
if page not in self.pages:
print "delete_page() : the page %s doesn't exists" % page
return False
self.dump_elements(page, destroy=destroy)
self.pages.pop(page)
print "Page %s deleted." % page
return True
def fill(self, color):
"""
Fill the current page
:param color: label of the color
:type color: str
"""
self.win.fill(self.colors[color].get_rgb())
def exists(self, label):
"""
Rreturn if a given element exists
:param label: label of the element
:rtype: bool
"""
return label in self.elements.keys()
def parse_template(self, name, lines, file=None, opt=None):
"""
Parse lines of skt file
:param name: name of the template
:param lines: lines to parse
:param file: filename of the element
:param opt: options
:return:
"""
if opt is None:
opt = {}
options = {}
options.update(opt)
mode = None
page = {
'title': None,
'label': None,
'width': None,
'height': None,
'bg': None
}
""" Supprime le template s'il existe déjà """
self.undo_template(name)
elements = {'colors_and_fonts': {}, 'def': {}, 'placing': []}
""" Récupération des éléments du fichier """
for line in lines:
line = line.strip()
line = line.replace('\n', '')
if len(line) >= 2 and line[0] != '/' and line[1] != '/':
if re.match(r'#def', line) is not None:
mode = 'def'
elif re.match(r'#placing', line) is not None:
mode = 'placing'
else:
possible_bg = re.findall("#bg\s*\:\s*(\w+)", line) # Récupère le bg
possible_page = re.findall("#page\s*\:\s*(\w+)\(?(\d*)?x?(\d*)?\)?", line) # Récupère la page
possible_titre = re.findall("#title\s*\:\s*([\w\s]+)", line) # Récupère le titre
possible_def = re.findall(
"(text|rect|img|circle|font|color)\s*:\s*(\w+)\((.*)\)\s*(\"([\w\d\s]*)\")?\s*",
line) # récupère les définitions
possible_placing = re.findall("(\w+)\((.*)\)", line) # Récupère les placements d'éléments
# Paramètre de la page #page
if mode is None and len(possible_page) == 1:
if isinstance(possible_page[0], tuple):
page['label'], page['width'], page['height'] = possible_page[0]
page['width'] = int(page['width'])
page['height'] = int(page['height'])
else:
page['label'] = possible_page[0]
page['label'].replace(' ', '')
# #bg
elif mode is None and len(possible_bg) == 1:
page['bg'] = possible_bg[0]
# #title
elif mode is None and len(possible_titre) == 1:
page['title'] = possible_titre[0].replace('\n', '')
# #def
elif mode == 'def' and len(possible_def) > 0:
if len(possible_def[0]) == 3:
type, label, params = possible_def[0]
content = None
elif len(possible_def[0]) == 5:
type, label, params, c, content = possible_def[0]
# Récupère les éléments entre guillements
first_comma = None
last_comma = None
after_comma = None
for k in range(len(params)):
if params[k] == '"' and first_comma is None:
first_comma = k
elif params[k] == '"':
last_comma = k
if after_comma is None and last_comma is not None and params[k] == ',':
after_comma = k
if first_comma is not None:
content = params[first_comma + 1:last_comma]
params = params[0:first_comma] + params[after_comma + 1:]
params.replace(' ', '') # Enlève les espaces
params = params.split(',') # Sépare par la ','
for k in range(len(params)):
params[k] = params[k].strip()
if type == 'color' or type == 'font':
sub_mode = 'colors_and_fonts'
else:
sub_mode = mode
elements[sub_mode][label] = {
'type': type,
'params': params,
'content': content
}
# #placing
elif mode == 'placing' and len(possible_placing) > 0:
label, params = possible_placing[0]
params.replace(' ', '')
params = params.split(',')
for k in range(len(params)):
params[k] = params[k].strip()
elements[mode].append({
'label': label,
'params': params
})
""" Parcourt des éléments et création de la page """
if page['label'] is None:
if self.current_page == -1:
raise ValueError('No active page found. Use the go_to method before importing the template file.')
label_page = self.current_page
else:
label_page = self.new_page(page['title'], page['width'], page['height'], label=page['label'], bg=page['bg'])
if name not in self.templates:
self.templates[name] = {'page': None, 'elements': []}
self.templates[name]['page'] = label_page
# On ajoute les couleurs et fonts
for label, elem in elements['colors_and_fonts'].items():
if elem['type'] == 'color':
try:
if len(elem['params']) == 3 and elem['params'][0].isdigit() and elem['params'][1].isdigit() and \
elem['params'][2].isdigit():
self.new_color((int(elem['params'][0]), int(elem['params'][1]), int(elem['params'][2])), label)
elif len(elem['params']) == 1:
self.new_color(int(elem['params'][0]), label)
except Exception as e:
if file is not None:
raise ValueError(
'The options for ' + label + ' in the file ' + file + ' are incorrect. : %s' % e)
else:
raise ValueError('The options for ' + label + ' are incorrect : %s' % e)
elif elem['type'] == 'font':
try:
self.new_font(elem['params'][0], int(elem['params'][1]), label)
except Exception as e:
if file is not None:
raise ValueError(
'The options for ' + label + ' in the file ' + file + ' are incorrect : %s' % e)
else:
raise ValueError('The options for ' + label + ' are incorrect : %s' % e)
# On ajoute les éléments
for label, elem in elements['def'].items():
if elem['type'] == 'text':
self.new_text(elem['content'], elem['params'][0].replace(' ', ''), elem['params'][1].replace(' ', ''),
label, overwrite=True)
elif elem['type'] == 'rect':
self.new_rect(elem['params'][0].replace(' ', ''), int(elem['params'][1]), label, overwrite=True)
elif elem['type'] == 'circle':
self.new_circle(elem['params'][0].replace(' ', ''), int(elem['params'][1]), int(elem['params'][2]),
label, overwrite=True)
elif elem['type'] == 'img':
elem['params'][0] = elem['params'][0].replace('IMG_FOLDER', options['IMG_FOLDER']).replace('\\', '/')
if len(elem['params']) == 2 and elem['params'][1] == 'True':
self.new_img(elem['params'][0], alpha=elem['params'][1], label=label, overwrite=True)
else:
self.new_img(elem['params'][0], label=label, overwrite=True)
# On ajoute à la page
for info in elements['placing']:
label = info['label']
self.templates[name]['elements'].append(label)
if label not in self.elements:
print "parse_template('%s') : Le label %s n'existe pas." % (name, label)
continue
if self.elements[label]['type'] == 'rect':
if info['params'][0].isdigit():
info['params'][0] = int(info['params'][0])
if info['params'][1].isdigit():
info['params'][1] = int(info['params'][1])
if info['params'][2].isdigit():
info['params'][2] = int(info['params'][2])
if info['params'][3].isdigit():
info['params'][3] = int(info['params'][3])
self.add(label, [info['params'][0], info['params'][2]],
[info['params'][1], info['params'][3]], label_page)
else:
if info['params'][0].isdigit():
info['params'][0] = int(info['params'][0])
if info['params'][1].isdigit():
info['params'][1] = int(info['params'][1])
self.add(label, info['params'][0], info['params'][1], label_page)
def import_template(self, name, filename=None,
opt=None): # TODO: ajouter le page_label pour dire dans quelle page ajouter les éléments
"""
Import a skt template
:param name: name of the template
:param filename: filename of the template (facultative)
:param opt: options
{
'IMG_FOLDER': project_dir + '/res', directory of images to use
'SKT_FOLDER': project_dir + '/templates' directory of the skt files
}
See the wiki (in french) to learn more about skt files
https://github.com/totorigolo/WiiQuizz/wiki/WindowHelper#les-fichiers-template
"""
project_dir = os.path.abspath('/'.join((os.path.dirname(os.path.abspath(__file__)), '..')))
if opt is None:
opt = {}
options = {
'IMG_FOLDER': project_dir + '/res',
'SKT_FOLDER': project_dir + '/templates'
}
options.update(opt)
if filename is None:
# if re.match('.*\.skt', name) is None:
options['SKT_FOLDER'] = options['SKT_FOLDER'].replace('\\', '/')
filename = options['SKT_FOLDER'] + '/' + name + '.skt'
with open(filename, 'r') as file:
lines = file.readlines()
self.parse_template(name, lines, filename, options)
def undo_template(self, name):
"""
Supprime tous les éléments ajoutés par le dernier import_template à la vue
:return: False si le template label n'existe pas, sinon True
"""
if name not in self.templates:
return False
for elem in self.templates[name]["elements"]:
self.destroy(elem, self.templates[name]["page"])
self.templates.pop(name, None)
def get_unused_label(self):
new_label = len(self.elements)
while new_label in self.elements:
new_label += 1
return new_label
| totorigolo/WiiQuizz | helpers/WindowHelper.py | Python | gpl-2.0 | 57,378 |
import unittest
from utils import str_utils
class TestStrUtils(unittest.TestCase):
def test_equal_number_of_parameters(self):
self.assertEqual(str_utils.split("a b c", " ", 3), ["a", "b", "c"])
def test_more_parameters_than_limit(self):
self.assertEqual(str_utils.split("a b c d", " ", 3), ["a", "b", "c d"])
def test_less_parameters_than_limit(self):
self.assertEqual(str_utils.split("a b", " ", 3), ["a", "b", ""])
| Tigge/platinumshrimp | utils/test/test_str_utils.py | Python | mit | 458 |
import string
final_buffer = 'list_phoneme <- [\n'
f = open('phonemes.txt', 'r')
buffer = f.read()
f.close()
pos = buffer.find('<Phonemes>')
if pos != -1:
buffer = buffer[pos+10:len(buffer)-13]
buffer = string.split(buffer, ',')
for phoneme_array in buffer:
phoneme = string.split(phoneme_array, ':')
first_time = phoneme[0]
last_time = phoneme[1]
phoneme_type = phoneme[2]
final_buffer += '{\n'
final_buffer += ' first_time =' + first_time + ',\n'
final_buffer += ' last_time =' + last_time + ',\n'
final_buffer += ' phoneme_type =\"' + phoneme_type + '\"\n'
final_buffer += '},\n'
final_buffer += ']'
f = open('phonemes.nut', 'w')
f.write(final_buffer)
f.close() | astrofra/amiga-memories | app/phoneme_to_nut.py | Python | mit | 787 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._regexpidbase import RegExpIdBase
#-------------------------------------------------------------------------
#
# HasIdOf
#
#-------------------------------------------------------------------------
class RegExpIdOf(RegExpIdBase):
"""
Rule that checks for an event whose GRAMPS ID
matches regular expression.
"""
name = _('Events with Id containing <text>')
description = _("Matches events whose Gramps ID matches "
"the regular expression")
| pmghalvorsen/gramps_branch | gramps/gen/filters/rules/event/_regexpidof.py | Python | gpl-2.0 | 1,741 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-17 23:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('laboite.apps.parking', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='appparking',
name='parking',
field=models.CharField(choices=[('HFR', 'Henri Fr\xe9ville'), ('JFK', 'J.F. Kennedy'), ('POT', 'La Poterie'), ('PRE', 'Les Pr\xe9ales'), ('VU', 'Villejean-Universit\xe9')], help_text='Veuillez s\xe9lectionner votre parking', max_length=3),
),
]
| bgaultier/laboitepro | laboite/apps/parking/migrations/0002_auto_20170618_0100.py | Python | agpl-3.0 | 618 |
#-*- coding: utf-8 -*-
'''
python-libtorrent for Kodi (script.module.libtorrent)
Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import os, sys
import xbmc, xbmcgui, xbmcaddon
from net import HTTP
from core import filetools ### Alfa
__libbaseurl__ = "https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"
#__settings__ = xbmcaddon.Addon(id='script.module.libtorrent')
#__version__ = __settings__.getAddonInfo('version')
#__plugin__ = __settings__.getAddonInfo('name') + " v." + __version__
#__icon__=os.path.join(xbmc.translatePath('special://home'), 'addons',
# 'script.module.libtorrent', 'icon.png')
#__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
__version__ = '1.1.17' ### Alfa
__plugin__ = "python-libtorrent v.1.1.7" ### Alfa
__icon__=os.path.join(xbmc.translatePath('special://home'), 'addons',
'plugin.video.alfa', 'icon.png') ### Alfa
#__language__ = __settings__.getLocalizedString ### Alfa
#from python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
from lib.python_libtorrent.python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
def log(msg):
try:
xbmc.log("### [%s]: %s" % (__plugin__,msg,), level=xbmc.LOGNOTICE )
except UnicodeEncodeError:
xbmc.log("### [%s]: %s" % (__plugin__,msg.encode("utf-8", "ignore"),), level=xbmc.LOGNOTICE )
except:
xbmc.log("### [%s]: %s" % (__plugin__,'ERROR LOG',), level=xbmc.LOGNOTICE )
def getSettingAsBool(setting):
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
return __settings__.getSetting(setting).lower() == "true"
class LibraryManager():
def __init__(self, dest_path, platform):
self.dest_path = dest_path
self.platform = platform
self.root=os.path.dirname(os.path.dirname(__file__))
ver1, ver2, ver3 = platform['version'].split('.') ### Alfa: resto método
try:
ver1 = int(ver1)
ver2 = int(ver2)
except:
pass
if ver1 >= 1 and ver2 >= 2:
global __libbaseurl__
__libbaseurl__ = 'https://github.com/alfa-addon/alfa-repo/raw/master/downloads/libtorrent'
def check_exist(self):
for libname in get_libname(self.platform):
if not filetools.exists(os.path.join(self.dest_path,libname)):
return False
return True
def check_update(self):
need_update=False
for libname in get_libname(self.platform):
if libname!='liblibtorrent.so':
self.libpath = os.path.join(self.dest_path, libname)
self.sizepath=os.path.join(self.root, self.platform['system'], self.platform['version'], libname+'.size.txt')
size=str(os.path.getsize(self.libpath))
size_old=open( self.sizepath, "r" ).read()
if size_old!=size:
need_update=True
return need_update
def update(self):
if self.check_update():
for libname in get_libname(self.platform):
self.libpath = os.path.join(self.dest_path, libname)
filetools.remove(self.libpath)
self.download()
def download(self):
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
filetools.mkdir(self.dest_path)
for libname in get_libname(self.platform):
dest = os.path.join(self.dest_path, libname)
log("try to fetch %s" % libname)
url = "%s/%s/%s/%s.zip" % (__libbaseurl__, self.platform['system'], self.platform['version'], libname)
if libname!='liblibtorrent.so':
try:
self.http = HTTP()
self.http.fetch(url, download=dest + ".zip", progress=False) ### Alfa
log("%s -> %s" % (url, dest))
xbmc.executebuiltin('XBMC.Extract("%s.zip","%s")' % (dest, self.dest_path), True)
filetools.remove(dest + ".zip")
except:
text = 'Failed download %s!' % libname
xbmc.executebuiltin("XBMC.Notification(%s,%s,%s,%s)" % (__plugin__,text,750,__icon__))
else:
filetools.copy(os.path.join(self.dest_path, 'libtorrent.so'), dest, silent=True) ### Alfa
dest_alfa = os.path.join(xbmc.translatePath(__settings__.getAddonInfo('Path')), \
'lib', libname) ### Alfa
filetools.copy(dest, dest_alfa, silent=True) ### Alfa
dest_alfa = os.path.join(xbmc.translatePath(__settings__.getAddonInfo('Profile')), \
'custom_code', 'lib', libname) ### Alfa
filetools.copy(dest, dest_alfa, silent=True) ### Alfa
return True
def android_workaround(self, new_dest_path): ### Alfa (entera)
import subprocess
for libname in get_libname(self.platform):
libpath=os.path.join(self.dest_path, libname)
size=str(os.path.getsize(libpath))
new_libpath=os.path.join(new_dest_path, libname)
if filetools.exists(new_libpath):
new_size=str(os.path.getsize(new_libpath))
if size != new_size:
filetools.remove(new_libpath)
if filetools.exists(new_libpath):
try:
command = ['su', '-c', 'rm', '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando ROOT: %s' % str(command))
except:
log('Sin PERMISOS ROOT: %s' % str(command))
if not filetools.exists(new_libpath):
log('Deleted: (%s) %s -> (%s) %s' %(size, libpath, new_size, new_libpath))
if not filetools.exists(new_libpath):
filetools.copy(libpath, new_libpath, silent=True) ### ALFA
log('Copying... %s -> %s' %(libpath, new_libpath))
if not filetools.exists(new_libpath):
try:
command = ['su', '-c', 'cp', '%s' % libpath, '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando ROOT: %s' % str(command))
command = ['su', '-c', 'chmod', '777', '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando ROOT: %s' % str(command))
except:
log('Sin PERMISOS ROOT: %s' % str(command))
if not filetools.exists(new_libpath):
log('ROOT Copy Failed!')
else:
command = ['chmod', '777', '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando: %s' % str(command))
else:
log('Module exists. Not copied... %s' % new_libpath) ### ALFA
return new_dest_path
| alfa-jor/addon | plugin.video.alfa/lib/python_libtorrent/python_libtorrent/functions.py | Python | gpl-3.0 | 9,266 |
""" Tests for utils. """
import collections
from datetime import datetime, timedelta
import mock
from pytz import UTC
from django.test import TestCase
from django.test.utils import override_settings
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.django import modulestore
from xmodule.partitions.partitions import UserPartition, Group
from contentstore import utils
from contentstore.tests.utils import CourseTestCase
class LMSLinksTestCase(TestCase):
""" Tests for LMS links. """
def about_page_test(self):
""" Get URL for about page, no marketing site """
# default for ENABLE_MKTG_SITE is False.
self.assertEquals(self.get_about_page_link(), "//localhost:8000/courses/mitX/101/test/about")
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
def about_page_marketing_site_test(self):
""" Get URL for about page, marketing root present. """
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
self.assertEquals(self.get_about_page_link(), "//dummy-root/courses/mitX/101/test/about")
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': False}):
self.assertEquals(self.get_about_page_link(), "//localhost:8000/courses/mitX/101/test/about")
@override_settings(MKTG_URLS={'ROOT': 'http://www.dummy'})
def about_page_marketing_site_remove_http_test(self):
""" Get URL for about page, marketing root present, remove http://. """
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
self.assertEquals(self.get_about_page_link(), "//www.dummy/courses/mitX/101/test/about")
@override_settings(MKTG_URLS={'ROOT': 'https://www.dummy'})
def about_page_marketing_site_remove_https_test(self):
""" Get URL for about page, marketing root present, remove https://. """
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
self.assertEquals(self.get_about_page_link(), "//www.dummy/courses/mitX/101/test/about")
@override_settings(MKTG_URLS={'ROOT': 'www.dummyhttps://x'})
def about_page_marketing_site_https__edge_test(self):
""" Get URL for about page, only remove https:// at the beginning of the string. """
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
self.assertEquals(self.get_about_page_link(), "//www.dummyhttps://x/courses/mitX/101/test/about")
@override_settings(MKTG_URLS={})
def about_page_marketing_urls_not_set_test(self):
""" Error case. ENABLE_MKTG_SITE is True, but there is either no MKTG_URLS, or no MKTG_URLS Root property. """
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
self.assertEquals(self.get_about_page_link(), None)
@override_settings(LMS_BASE=None)
def about_page_no_lms_base_test(self):
""" No LMS_BASE, nor is ENABLE_MKTG_SITE True """
self.assertEquals(self.get_about_page_link(), None)
def get_about_page_link(self):
""" create mock course and return the about page link """
course_key = SlashSeparatedCourseKey('mitX', '101', 'test')
return utils.get_lms_link_for_about_page(course_key)
def lms_link_test(self):
""" Tests get_lms_link_for_item. """
course_key = SlashSeparatedCourseKey('mitX', '101', 'test')
location = course_key.make_usage_key('vertical', 'contacting_us')
link = utils.get_lms_link_for_item(location, False)
self.assertEquals(link, "//localhost:8000/courses/mitX/101/test/jump_to/i4x://mitX/101/vertical/contacting_us")
# test preview
link = utils.get_lms_link_for_item(location, True)
self.assertEquals(
link,
"//preview.localhost/courses/mitX/101/test/jump_to/i4x://mitX/101/vertical/contacting_us"
)
# now test with the course' location
location = course_key.make_usage_key('course', 'test')
link = utils.get_lms_link_for_item(location)
self.assertEquals(link, "//localhost:8000/courses/mitX/101/test/jump_to/i4x://mitX/101/course/test")
class ExtraPanelTabTestCase(TestCase):
""" Tests adding and removing extra course tabs. """
def get_tab_type_dicts(self, tab_types):
""" Returns an array of tab dictionaries. """
if tab_types:
return [{'tab_type': tab_type} for tab_type in tab_types.split(',')]
else:
return []
def get_course_with_tabs(self, tabs=None):
""" Returns a mock course object with a tabs attribute. """
if tabs is None:
tabs = []
course = collections.namedtuple('MockCourse', ['tabs'])
if isinstance(tabs, basestring):
course.tabs = self.get_tab_type_dicts(tabs)
else:
course.tabs = tabs
return course
class XBlockVisibilityTestCase(SharedModuleStoreTestCase):
"""Tests for xblock visibility for students."""
@classmethod
def setUpClass(cls):
super(XBlockVisibilityTestCase, cls).setUpClass()
cls.dummy_user = ModuleStoreEnum.UserID.test
cls.past = datetime(1970, 1, 1, tzinfo=UTC)
cls.future = datetime.now(UTC) + timedelta(days=1)
cls.course = CourseFactory.create()
def test_private_unreleased_xblock(self):
"""Verifies that a private unreleased xblock is not visible"""
self._test_visible_to_students(False, 'private_unreleased', self.future)
def test_private_released_xblock(self):
"""Verifies that a private released xblock is not visible"""
self._test_visible_to_students(False, 'private_released', self.past)
def test_public_unreleased_xblock(self):
"""Verifies that a public (published) unreleased xblock is not visible"""
self._test_visible_to_students(False, 'public_unreleased', self.future, publish=True)
def test_public_released_xblock(self):
"""Verifies that public (published) released xblock is visible if staff lock is not enabled."""
self._test_visible_to_students(True, 'public_released', self.past, publish=True)
def test_private_no_start_xblock(self):
"""Verifies that a private xblock with no start date is not visible"""
self._test_visible_to_students(False, 'private_no_start', None)
def test_public_no_start_xblock(self):
"""Verifies that a public (published) xblock with no start date is visible unless staff lock is enabled"""
self._test_visible_to_students(True, 'public_no_start', None, publish=True)
def test_draft_released_xblock(self):
"""Verifies that a xblock with an unreleased draft and a released published version is visible"""
vertical = self._create_xblock_with_start_date('draft_released', self.past, publish=True)
# Create an unreleased draft version of the xblock
vertical.start = self.future
modulestore().update_item(vertical, self.dummy_user)
self.assertTrue(utils.is_currently_visible_to_students(vertical))
def _test_visible_to_students(self, expected_visible_without_lock, name, start_date, publish=False):
"""
Helper method that checks that is_xblock_visible_to_students returns the correct value both
with and without visible_to_staff_only set.
"""
no_staff_lock = self._create_xblock_with_start_date(name, start_date, publish, visible_to_staff_only=False)
self.assertEqual(expected_visible_without_lock, utils.is_currently_visible_to_students(no_staff_lock))
# any xblock with visible_to_staff_only set to True should not be visible to students.
staff_lock = self._create_xblock_with_start_date(
name + "_locked", start_date, publish, visible_to_staff_only=True
)
self.assertFalse(utils.is_currently_visible_to_students(staff_lock))
def _create_xblock_with_start_date(self, name, start_date, publish=False, visible_to_staff_only=False):
"""Helper to create an xblock with a start date, optionally publishing it"""
vertical = modulestore().create_item(
self.dummy_user, self.course.location.course_key, 'vertical', name,
fields={'start': start_date, 'visible_to_staff_only': visible_to_staff_only}
)
if publish:
modulestore().publish(vertical.location, self.dummy_user)
return vertical
class ReleaseDateSourceTest(CourseTestCase):
"""Tests for finding the source of an xblock's release date."""
def setUp(self):
super(ReleaseDateSourceTest, self).setUp()
self.chapter = ItemFactory.create(category='chapter', parent_location=self.course.location)
self.sequential = ItemFactory.create(category='sequential', parent_location=self.chapter.location)
self.vertical = ItemFactory.create(category='vertical', parent_location=self.sequential.location)
# Read again so that children lists are accurate
self.chapter = self.store.get_item(self.chapter.location)
self.sequential = self.store.get_item(self.sequential.location)
self.vertical = self.store.get_item(self.vertical.location)
self.date_one = datetime(1980, 1, 1, tzinfo=UTC)
self.date_two = datetime(2020, 1, 1, tzinfo=UTC)
def _update_release_dates(self, chapter_start, sequential_start, vertical_start):
"""Sets the release dates of the chapter, sequential, and vertical"""
self.chapter.start = chapter_start
self.chapter = self.store.update_item(self.chapter, ModuleStoreEnum.UserID.test)
self.sequential.start = sequential_start
self.sequential = self.store.update_item(self.sequential, ModuleStoreEnum.UserID.test)
self.vertical.start = vertical_start
self.vertical = self.store.update_item(self.vertical, ModuleStoreEnum.UserID.test)
def _verify_release_date_source(self, item, expected_source):
"""Helper to verify that the release date source of a given item matches the expected source"""
source = utils.find_release_date_source(item)
self.assertEqual(source.location, expected_source.location)
self.assertEqual(source.start, expected_source.start)
def test_chapter_source_for_vertical(self):
"""Tests a vertical's release date being set by its chapter"""
self._update_release_dates(self.date_one, self.date_one, self.date_one)
self._verify_release_date_source(self.vertical, self.chapter)
def test_sequential_source_for_vertical(self):
"""Tests a vertical's release date being set by its sequential"""
self._update_release_dates(self.date_one, self.date_two, self.date_two)
self._verify_release_date_source(self.vertical, self.sequential)
def test_chapter_source_for_sequential(self):
"""Tests a sequential's release date being set by its chapter"""
self._update_release_dates(self.date_one, self.date_one, self.date_one)
self._verify_release_date_source(self.sequential, self.chapter)
def test_sequential_source_for_sequential(self):
"""Tests a sequential's release date being set by itself"""
self._update_release_dates(self.date_one, self.date_two, self.date_two)
self._verify_release_date_source(self.sequential, self.sequential)
class StaffLockTest(CourseTestCase):
"""Base class for testing staff lock functions."""
def setUp(self):
super(StaffLockTest, self).setUp()
self.chapter = ItemFactory.create(category='chapter', parent_location=self.course.location)
self.sequential = ItemFactory.create(category='sequential', parent_location=self.chapter.location)
self.vertical = ItemFactory.create(category='vertical', parent_location=self.sequential.location)
self.orphan = ItemFactory.create(category='vertical', parent_location=self.sequential.location)
# Read again so that children lists are accurate
self.chapter = self.store.get_item(self.chapter.location)
self.sequential = self.store.get_item(self.sequential.location)
self.vertical = self.store.get_item(self.vertical.location)
# Orphan the orphaned xblock
self.sequential.children = [self.vertical.location]
self.sequential = self.store.update_item(self.sequential, ModuleStoreEnum.UserID.test)
def _set_staff_lock(self, xblock, is_locked):
"""If is_locked is True, xblock is staff locked. Otherwise, the xblock staff lock field is removed."""
field = xblock.fields['visible_to_staff_only']
if is_locked:
field.write_to(xblock, True)
else:
field.delete_from(xblock)
return self.store.update_item(xblock, ModuleStoreEnum.UserID.test)
def _update_staff_locks(self, chapter_locked, sequential_locked, vertical_locked):
"""
Sets the staff lock on the chapter, sequential, and vertical
If the corresponding argument is False, then the field is deleted from the xblock
"""
self.chapter = self._set_staff_lock(self.chapter, chapter_locked)
self.sequential = self._set_staff_lock(self.sequential, sequential_locked)
self.vertical = self._set_staff_lock(self.vertical, vertical_locked)
class StaffLockSourceTest(StaffLockTest):
"""Tests for finding the source of an xblock's staff lock."""
def _verify_staff_lock_source(self, item, expected_source):
"""Helper to verify that the staff lock source of a given item matches the expected source"""
source = utils.find_staff_lock_source(item)
self.assertEqual(source.location, expected_source.location)
self.assertTrue(source.visible_to_staff_only)
def test_chapter_source_for_vertical(self):
"""Tests a vertical's staff lock being set by its chapter"""
self._update_staff_locks(True, False, False)
self._verify_staff_lock_source(self.vertical, self.chapter)
def test_sequential_source_for_vertical(self):
"""Tests a vertical's staff lock being set by its sequential"""
self._update_staff_locks(True, True, False)
self._verify_staff_lock_source(self.vertical, self.sequential)
self._update_staff_locks(False, True, False)
self._verify_staff_lock_source(self.vertical, self.sequential)
def test_vertical_source_for_vertical(self):
"""Tests a vertical's staff lock being set by itself"""
self._update_staff_locks(True, True, True)
self._verify_staff_lock_source(self.vertical, self.vertical)
self._update_staff_locks(False, True, True)
self._verify_staff_lock_source(self.vertical, self.vertical)
self._update_staff_locks(False, False, True)
self._verify_staff_lock_source(self.vertical, self.vertical)
def test_orphan_has_no_source(self):
"""Tests that a orphaned xblock has no staff lock source"""
self.assertIsNone(utils.find_staff_lock_source(self.orphan))
def test_no_source_for_vertical(self):
"""Tests a vertical with no staff lock set anywhere"""
self._update_staff_locks(False, False, False)
self.assertIsNone(utils.find_staff_lock_source(self.vertical))
class InheritedStaffLockTest(StaffLockTest):
"""Tests for determining if an xblock inherits a staff lock."""
def test_no_inheritance(self):
"""Tests that a locked or unlocked vertical with no locked ancestors does not have an inherited lock"""
self._update_staff_locks(False, False, False)
self.assertFalse(utils.ancestor_has_staff_lock(self.vertical))
self._update_staff_locks(False, False, True)
self.assertFalse(utils.ancestor_has_staff_lock(self.vertical))
def test_inheritance_in_locked_section(self):
"""Tests that a locked or unlocked vertical in a locked section has an inherited lock"""
self._update_staff_locks(True, False, False)
self.assertTrue(utils.ancestor_has_staff_lock(self.vertical))
self._update_staff_locks(True, False, True)
self.assertTrue(utils.ancestor_has_staff_lock(self.vertical))
def test_inheritance_in_locked_subsection(self):
"""Tests that a locked or unlocked vertical in a locked subsection has an inherited lock"""
self._update_staff_locks(False, True, False)
self.assertTrue(utils.ancestor_has_staff_lock(self.vertical))
self._update_staff_locks(False, True, True)
self.assertTrue(utils.ancestor_has_staff_lock(self.vertical))
def test_no_inheritance_for_orphan(self):
"""Tests that an orphaned xblock does not inherit staff lock"""
self.assertFalse(utils.ancestor_has_staff_lock(self.orphan))
class GroupVisibilityTest(CourseTestCase):
"""
Test content group access rules.
"""
def setUp(self):
super(GroupVisibilityTest, self).setUp()
chapter = ItemFactory.create(category='chapter', parent_location=self.course.location)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
vertical = ItemFactory.create(category='vertical', parent_location=sequential.location)
html = ItemFactory.create(category='html', parent_location=vertical.location)
problem = ItemFactory.create(
category='problem', parent_location=vertical.location, data="<problem></problem>"
)
self.sequential = self.store.get_item(sequential.location)
self.vertical = self.store.get_item(vertical.location)
self.html = self.store.get_item(html.location)
self.problem = self.store.get_item(problem.location)
# Add partitions to the course
self.course.user_partitions = [
UserPartition(
id=0,
name="Partition 0",
description="Partition 0",
scheme=UserPartition.get_scheme("random"),
groups=[
Group(id=0, name="Group A"),
Group(id=1, name="Group B"),
],
),
UserPartition(
id=1,
name="Partition 1",
description="Partition 1",
scheme=UserPartition.get_scheme("random"),
groups=[
Group(id=0, name="Group C"),
Group(id=1, name="Group D"),
],
),
UserPartition(
id=2,
name="Partition 2",
description="Partition 2",
scheme=UserPartition.get_scheme("random"),
groups=[
Group(id=0, name="Group E"),
Group(id=1, name="Group F"),
Group(id=2, name="Group G"),
Group(id=3, name="Group H"),
],
),
]
self.course = self.store.update_item(self.course, ModuleStoreEnum.UserID.test)
def set_group_access(self, xblock, value):
""" Sets group_access to specified value and calls update_item to persist the change. """
xblock.group_access = value
self.store.update_item(xblock, self.user.id)
def test_no_visibility_set(self):
""" Tests when group_access has not been set on anything. """
def verify_all_components_visible_to_all(): # pylint: disable=invalid-name
""" Verifies when group_access has not been set on anything. """
for item in (self.sequential, self.vertical, self.html, self.problem):
self.assertFalse(utils.has_children_visible_to_specific_content_groups(item))
self.assertFalse(utils.is_visible_to_specific_content_groups(item))
verify_all_components_visible_to_all()
# Test with group_access set to Falsey values.
self.set_group_access(self.vertical, {1: []})
self.set_group_access(self.html, {2: None})
verify_all_components_visible_to_all()
def test_sequential_and_problem_have_group_access(self):
""" Tests when group_access is set on a few different components. """
self.set_group_access(self.sequential, {1: [0]})
# This is a no-op.
self.set_group_access(self.vertical, {1: []})
self.set_group_access(self.problem, {2: [3, 4]})
# Note that "has_children_visible_to_specific_content_groups" only checks immediate children.
self.assertFalse(utils.has_children_visible_to_specific_content_groups(self.sequential))
self.assertTrue(utils.has_children_visible_to_specific_content_groups(self.vertical))
self.assertFalse(utils.has_children_visible_to_specific_content_groups(self.html))
self.assertFalse(utils.has_children_visible_to_specific_content_groups(self.problem))
self.assertTrue(utils.is_visible_to_specific_content_groups(self.sequential))
self.assertFalse(utils.is_visible_to_specific_content_groups(self.vertical))
self.assertFalse(utils.is_visible_to_specific_content_groups(self.html))
self.assertTrue(utils.is_visible_to_specific_content_groups(self.problem))
class GetUserPartitionInfoTest(ModuleStoreTestCase):
"""
Tests for utility function that retrieves user partition info
and formats it for consumption by the editing UI.
"""
def setUp(self):
"""Create a dummy course. """
super(GetUserPartitionInfoTest, self).setUp()
self.course = CourseFactory()
self.block = ItemFactory.create(category="problem", parent_location=self.course.location) # pylint: disable=no-member
# Set up some default partitions
self._set_partitions([
UserPartition(
id=0,
name="Cohort user partition",
scheme=UserPartition.get_scheme("cohort"),
description="Cohorted user partition",
groups=[
Group(id=0, name="Group A"),
Group(id=1, name="Group B"),
],
),
UserPartition(
id=1,
name="Random user partition",
scheme=UserPartition.get_scheme("random"),
description="Random user partition",
groups=[
Group(id=0, name="Group C"),
],
),
])
def test_retrieves_partition_info_with_selected_groups(self):
# Initially, no group access is set on the block, so no groups should
# be marked as selected.
expected = [
{
"id": 0,
"name": u"Cohort user partition",
"scheme": u"cohort",
"groups": [
{
"id": 0,
"name": u"Group A",
"selected": False,
"deleted": False,
},
{
"id": 1,
"name": u"Group B",
"selected": False,
"deleted": False,
},
]
},
{
"id": 1,
"name": u"Random user partition",
"scheme": u"random",
"groups": [
{
"id": 0,
"name": u"Group C",
"selected": False,
"deleted": False,
},
]
}
]
self.assertEqual(self._get_partition_info(), expected)
# Update group access and expect that now one group is marked as selected.
self._set_group_access({0: [1]})
expected[0]["groups"][1]["selected"] = True
self.assertEqual(self._get_partition_info(), expected)
def test_deleted_groups(self):
# Select a group that is not defined in the partition
self._set_group_access({0: [3]})
# Expect that the group appears as selected but is marked as deleted
partitions = self._get_partition_info()
groups = partitions[0]["groups"]
self.assertEqual(len(groups), 3)
self.assertEqual(groups[2], {
"id": 3,
"name": "Deleted group",
"selected": True,
"deleted": True
})
def test_filter_by_partition_scheme(self):
partitions = self._get_partition_info(schemes=["random"])
self.assertEqual(len(partitions), 1)
self.assertEqual(partitions[0]["scheme"], "random")
def test_exclude_inactive_partitions(self):
# Include an inactive verification scheme
self._set_partitions([
UserPartition(
id=0,
name="Cohort user partition",
scheme=UserPartition.get_scheme("cohort"),
description="Cohorted user partition",
groups=[
Group(id=0, name="Group A"),
Group(id=1, name="Group B"),
],
),
UserPartition(
id=1,
name="Verification user partition",
scheme=UserPartition.get_scheme("verification"),
description="Verification user partition",
groups=[
Group(id=0, name="Group C"),
],
active=False,
),
])
# Expect that the inactive scheme is excluded from the results
partitions = self._get_partition_info()
self.assertEqual(len(partitions), 1)
self.assertEqual(partitions[0]["scheme"], "cohort")
def test_exclude_partitions_with_no_groups(self):
# The cohort partition has no groups defined
self._set_partitions([
UserPartition(
id=0,
name="Cohort user partition",
scheme=UserPartition.get_scheme("cohort"),
description="Cohorted user partition",
groups=[],
),
UserPartition(
id=1,
name="Verification user partition",
scheme=UserPartition.get_scheme("verification"),
description="Verification user partition",
groups=[
Group(id=0, name="Group C"),
],
),
])
# Expect that the partition with no groups is excluded from the results
partitions = self._get_partition_info()
self.assertEqual(len(partitions), 1)
self.assertEqual(partitions[0]["scheme"], "verification")
def _set_partitions(self, partitions):
"""Set the user partitions of the course descriptor. """
self.course.user_partitions = partitions
self.course = self.store.update_item(self.course, ModuleStoreEnum.UserID.test)
def _set_group_access(self, group_access):
"""Set group access of the block. """
self.block.group_access = group_access
self.block = self.store.update_item(self.block, ModuleStoreEnum.UserID.test)
def _get_partition_info(self, schemes=None):
"""Retrieve partition info and selected groups. """
return utils.get_user_partition_info(self.block, schemes=schemes)
| waheedahmed/edx-platform | cms/djangoapps/contentstore/tests/test_utils.py | Python | agpl-3.0 | 27,659 |
#!/usr/bin/env python2
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
# Check needed software dependencies to nudge users to fix their setup
from __future__ import with_statement
import time
import signal
import sys
import shutil
import subprocess
import traceback
if sys.version_info < (2, 6):
print "Sorry, requires Python 2.6 or 2.7."
sys.exit(1)
try:
import Cheetah
if Cheetah.Version[0] != '2':
raise ValueError
except ValueError:
print "Sorry, requires Python module Cheetah 2.1.0 or newer."
sys.exit(1)
except:
print "The Python module Cheetah is required"
sys.exit(1)
import os
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), 'lib')))
# We only need this for compiling an EXE and I will just always do that on 2.6+
if sys.hexversion >= 0x020600F0:
from multiprocessing import freeze_support # @UnresolvedImport
import locale
import datetime
import threading
import getopt
import sickbeard
from sickbeard import db, logger, network_timezones, failed_history, name_cache
from sickbeard.tv import TVShow
from sickbeard.webserveInit import SRWebServer
from sickbeard.databases.mainDB import MIN_DB_VERSION, MAX_DB_VERSION
from sickbeard.event_queue import Events
from lib.configobj import ConfigObj
throwaway = datetime.datetime.strptime('20110101', '%Y%m%d')
signal.signal(signal.SIGINT, sickbeard.sig_handler)
signal.signal(signal.SIGTERM, sickbeard.sig_handler)
class SickRage(object):
def __init__(self):
# system event callback for shutdown/restart
sickbeard.events = Events(self.shutdown)
# daemon constants
self.runAsDaemon = False
self.CREATEPID = False
self.PIDFILE = ''
# webserver constants
self.webserver = None
self.forceUpdate = False
self.forcedPort = None
self.noLaunch = False
def help_message(self):
"""
print help message for commandline options
"""
help_msg = "\n"
help_msg += "Usage: " + sickbeard.MY_FULLNAME + " <option> <another option>\n"
help_msg += "\n"
help_msg += "Options:\n"
help_msg += "\n"
help_msg += " -h --help Prints this message\n"
help_msg += " -f --forceupdate Force update all shows in the DB (from tvdb) on startup\n"
help_msg += " -q --quiet Disables logging to console\n"
help_msg += " --nolaunch Suppress launching web browser on startup\n"
if sys.platform == 'win32':
help_msg += " -d --daemon Running as real daemon is not supported on Windows\n"
help_msg += " On Windows, --daemon is substituted with: --quiet --nolaunch\n"
else:
help_msg += " -d --daemon Run as double forked daemon (includes options --quiet --nolaunch)\n"
help_msg += " --pidfile=<path> Combined with --daemon creates a pidfile (full path including filename)\n"
help_msg += " -p <port> --port=<port> Override default/configured port to listen on\n"
help_msg += " --datadir=<path> Override folder (full path) as location for\n"
help_msg += " storing database, configfile, cache, logfiles \n"
help_msg += " Default: " + sickbeard.PROG_DIR + "\n"
help_msg += " --config=<path> Override config filename (full path including filename)\n"
help_msg += " to load configuration from \n"
help_msg += " Default: config.ini in " + sickbeard.PROG_DIR + " or --datadir location\n"
help_msg += " --noresize Prevent resizing of the banner/posters even if PIL is installed\n"
return help_msg
def start(self):
# do some preliminary stuff
sickbeard.MY_FULLNAME = os.path.normpath(os.path.abspath(__file__))
sickbeard.MY_NAME = os.path.basename(sickbeard.MY_FULLNAME)
sickbeard.PROG_DIR = os.path.dirname(sickbeard.MY_FULLNAME)
sickbeard.DATA_DIR = sickbeard.PROG_DIR
sickbeard.MY_ARGS = sys.argv[1:]
sickbeard.SYS_ENCODING = None
try:
locale.setlocale(locale.LC_ALL, "")
sickbeard.SYS_ENCODING = locale.getpreferredencoding()
except (locale.Error, IOError):
pass
# For OSes that are poorly configured I'll just randomly force UTF-8
if not sickbeard.SYS_ENCODING or sickbeard.SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
sickbeard.SYS_ENCODING = 'UTF-8'
if not hasattr(sys, "setdefaultencoding"):
reload(sys)
try:
# pylint: disable=E1101
# On non-unicode builds this will raise an AttributeError, if encoding type is not valid it throws a LookupError
sys.setdefaultencoding(sickbeard.SYS_ENCODING)
except:
sys.exit("Sorry, you MUST add the SickRage folder to the PYTHONPATH environment variable\n" +
"or find another way to force Python to use " + sickbeard.SYS_ENCODING + " for string encoding.")
# Need console logging for SickBeard.py and SickBeard-console.exe
self.consoleLogging = (not hasattr(sys, "frozen")) or (sickbeard.MY_NAME.lower().find('-console') > 0)
# Rename the main thread
threading.currentThread().name = "MAIN"
try:
opts, args = getopt.getopt(sys.argv[1:], "hfqdp::",
['help', 'forceupdate', 'quiet', 'nolaunch', 'daemon', 'pidfile=', 'port=',
'datadir=', 'config=', 'noresize']) # @UnusedVariable
except getopt.GetoptError:
sys.exit(self.help_message())
for o, a in opts:
# Prints help message
if o in ('-h', '--help'):
sys.exit(self.help_message())
# For now we'll just silence the logging
if o in ('-q', '--quiet'):
self.consoleLogging = False
# Should we update (from indexer) all shows in the DB right away?
if o in ('-f', '--forceupdate'):
self.forceUpdate = True
# Suppress launching web browser
# Needed for OSes without default browser assigned
# Prevent duplicate browser window when restarting in the app
if o in ('--nolaunch',):
self.noLaunch = True
# Override default/configured port
if o in ('-p', '--port'):
try:
self.forcedPort = int(a)
except ValueError:
sys.exit("Port: " + str(a) + " is not a number. Exiting.")
# Run as a double forked daemon
if o in ('-d', '--daemon'):
self.runAsDaemon = True
# When running as daemon disable consoleLogging and don't start browser
self.consoleLogging = False
self.noLaunch = True
if sys.platform == 'win32':
self.runAsDaemon = False
# Write a pidfile if requested
if o in ('--pidfile',):
self.CREATEPID = True
self.PIDFILE = str(a)
# If the pidfile already exists, sickbeard may still be running, so exit
if os.path.exists(self.PIDFILE):
sys.exit("PID file: " + self.PIDFILE + " already exists. Exiting.")
# Specify folder to load the config file from
if o in ('--config',):
sickbeard.CONFIG_FILE = os.path.abspath(a)
# Specify folder to use as the data dir
if o in ('--datadir',):
sickbeard.DATA_DIR = os.path.abspath(a)
# Prevent resizing of the banner/posters even if PIL is installed
if o in ('--noresize',):
sickbeard.NO_RESIZE = True
# The pidfile is only useful in daemon mode, make sure we can write the file properly
if self.CREATEPID:
if self.runAsDaemon:
pid_dir = os.path.dirname(self.PIDFILE)
if not os.access(pid_dir, os.F_OK):
sys.exit("PID dir: " + pid_dir + " doesn't exist. Exiting.")
if not os.access(pid_dir, os.W_OK):
sys.exit("PID dir: " + pid_dir + " must be writable (write permissions). Exiting.")
else:
if self.consoleLogging:
sys.stdout.write("Not running in daemon mode. PID file creation disabled.\n")
self.CREATEPID = False
# If they don't specify a config file then put it in the data dir
if not sickbeard.CONFIG_FILE:
sickbeard.CONFIG_FILE = os.path.join(sickbeard.DATA_DIR, "config.ini")
# Make sure that we can create the data dir
if not os.access(sickbeard.DATA_DIR, os.F_OK):
try:
os.makedirs(sickbeard.DATA_DIR, 0744)
except os.error, e:
raise SystemExit("Unable to create datadir '" + sickbeard.DATA_DIR + "'")
# Make sure we can write to the data dir
if not os.access(sickbeard.DATA_DIR, os.W_OK):
raise SystemExit("Datadir must be writeable '" + sickbeard.DATA_DIR + "'")
# Make sure we can write to the config file
if not os.access(sickbeard.CONFIG_FILE, os.W_OK):
if os.path.isfile(sickbeard.CONFIG_FILE):
raise SystemExit("Config file '" + sickbeard.CONFIG_FILE + "' must be writeable.")
elif not os.access(os.path.dirname(sickbeard.CONFIG_FILE), os.W_OK):
raise SystemExit(
"Config file root dir '" + os.path.dirname(sickbeard.CONFIG_FILE) + "' must be writeable.")
os.chdir(sickbeard.DATA_DIR)
# Check if we need to perform a restore first
restoreDir = os.path.join(sickbeard.DATA_DIR, 'restore')
if self.consoleLogging and os.path.exists(restoreDir):
if self.restore(restoreDir, sickbeard.DATA_DIR):
sys.stdout.write("Restore successful...\n")
else:
sys.stdout.write("Restore FAILED!\n")
# Load the config and publish it to the sickbeard package
if self.consoleLogging and not os.path.isfile(sickbeard.CONFIG_FILE):
sys.stdout.write("Unable to find '" + sickbeard.CONFIG_FILE + "' , all settings will be default!" + "\n")
sickbeard.CFG = ConfigObj(sickbeard.CONFIG_FILE)
# Initialize the config and our threads
sickbeard.initialize(consoleLogging=self.consoleLogging)
if self.runAsDaemon:
self.daemonize()
# Get PID
sickbeard.PID = os.getpid()
# Build from the DB to start with
self.loadShowsFromDB()
if self.forcedPort:
logger.log(u"Forcing web server to port " + str(self.forcedPort))
self.startPort = self.forcedPort
else:
self.startPort = sickbeard.WEB_PORT
if sickbeard.WEB_LOG:
self.log_dir = sickbeard.LOG_DIR
else:
self.log_dir = None
# sickbeard.WEB_HOST is available as a configuration value in various
# places but is not configurable. It is supported here for historic reasons.
if sickbeard.WEB_HOST and sickbeard.WEB_HOST != '0.0.0.0':
self.webhost = sickbeard.WEB_HOST
else:
if sickbeard.WEB_IPV6:
self.webhost = '::'
else:
self.webhost = '0.0.0.0'
# web server options
self.web_options = {
'port': int(self.startPort),
'host': self.webhost,
'data_root': os.path.join(sickbeard.PROG_DIR, 'gui', sickbeard.GUI_NAME),
'web_root': sickbeard.WEB_ROOT,
'log_dir': self.log_dir,
'username': sickbeard.WEB_USERNAME,
'password': sickbeard.WEB_PASSWORD,
'enable_https': sickbeard.ENABLE_HTTPS,
'handle_reverse_proxy': sickbeard.HANDLE_REVERSE_PROXY,
'https_cert': os.path.join(sickbeard.PROG_DIR, sickbeard.HTTPS_CERT),
'https_key': os.path.join(sickbeard.PROG_DIR, sickbeard.HTTPS_KEY),
}
# start web server
try:
self.webserver = SRWebServer(self.web_options)
self.webserver.start()
except IOError:
logger.log(u"Unable to start web server, is something else running on port %d?" % self.startPort,
logger.ERROR)
if sickbeard.LAUNCH_BROWSER and not self.runAsDaemon:
logger.log(u"Launching browser and exiting", logger.ERROR)
sickbeard.launchBrowser('https' if sickbeard.ENABLE_HTTPS else 'http', self.startPort, sickbeard.WEB_ROOT)
os._exit(1)
if self.consoleLogging:
print "Starting up SickRage " + sickbeard.BRANCH + " from " + sickbeard.CONFIG_FILE
# Fire up all our threads
sickbeard.start()
# Build internal name cache
name_cache.buildNameCache()
# refresh network timezones
network_timezones.update_network_dict()
# sure, why not?
if sickbeard.USE_FAILED_DOWNLOADS:
failed_history.trimHistory()
# Start an update if we're supposed to
if self.forceUpdate or sickbeard.UPDATE_SHOWS_ON_START:
sickbeard.showUpdateScheduler.action.run(force=True) # @UndefinedVariable
# Launch browser
if sickbeard.LAUNCH_BROWSER and not (self.noLaunch or self.runAsDaemon):
sickbeard.launchBrowser('https' if sickbeard.ENABLE_HTTPS else 'http', self.startPort, sickbeard.WEB_ROOT)
# main loop
while (True):
time.sleep(1)
def daemonize(self):
"""
Fork off as a daemon
"""
# pylint: disable=E1101
# Make a non-session-leader child process
try:
pid = os.fork() # @UndefinedVariable - only available in UNIX
if pid != 0:
os._exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
os.setsid() # @UndefinedVariable - only available in UNIX
# Make sure I can read my own files and shut out others
prev = os.umask(0)
os.umask(prev and int('077', 8))
# Make the child a session-leader by detaching from the terminal
try:
pid = os.fork() # @UndefinedVariable - only available in UNIX
if pid != 0:
os._exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Write pid
if self.CREATEPID:
pid = str(os.getpid())
logger.log(u"Writing PID: " + pid + " to " + str(self.PIDFILE))
try:
file(self.PIDFILE, 'w').write("%s\n" % pid)
except IOError, e:
logger.log_error_and_exit(
u"Unable to write PID file: " + self.PIDFILE + " Error: " + str(e.strerror) + " [" + str(
e.errno) + "]")
# Redirect all output
sys.stdout.flush()
sys.stderr.flush()
devnull = getattr(os, 'devnull', '/dev/null')
stdin = file(devnull, 'r')
stdout = file(devnull, 'a+')
stderr = file(devnull, 'a+')
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
def remove_pid_file(self, PIDFILE):
try:
if os.path.exists(PIDFILE):
os.remove(PIDFILE)
except (IOError, OSError):
return False
return True
def loadShowsFromDB(self):
"""
Populates the showList with shows from the database
"""
logger.log(u"Loading initial show list")
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_shows")
sickbeard.showList = []
for sqlShow in sqlResults:
try:
curShow = TVShow(int(sqlShow["indexer"]), int(sqlShow["indexer_id"]))
curShow.nextEpisode()
sickbeard.showList.append(curShow)
except Exception, e:
logger.log(
u"There was an error creating the show in " + sqlShow["location"] + ": " + str(e).decode('utf-8'),
logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
def restore(self, srcDir, dstDir):
try:
for file in os.listdir(srcDir):
srcFile = os.path.join(srcDir, file)
dstFile = os.path.join(dstDir, file)
bakFile = os.path.join(dstDir, file + '.bak')
shutil.move(dstFile, bakFile)
shutil.move(srcFile, dstFile)
os.rmdir(srcDir)
return True
except:
return False
def shutdown(self, type):
if sickbeard.started:
# stop all tasks
sickbeard.halt()
# save all shows to DB
sickbeard.saveAll()
# shutdown web server
if self.webserver:
logger.log("Shutting down Tornado")
self.webserver.shutDown()
try:
self.webserver.join(10)
except:
pass
# if run as daemon delete the pidfile
if self.runAsDaemon and self.CREATEPID:
self.remove_pid_file(self.PIDFILE)
if type == sickbeard.events.SystemEvent.RESTART:
install_type = sickbeard.versionCheckScheduler.action.install_type
popen_list = []
if install_type in ('git', 'source'):
popen_list = [sys.executable, sickbeard.MY_FULLNAME]
elif install_type == 'win':
if hasattr(sys, 'frozen'):
# c:\dir\to\updater.exe 12345 c:\dir\to\sickbeard.exe
popen_list = [os.path.join(sickbeard.PROG_DIR, 'updater.exe'), str(sickbeard.PID),
sys.executable]
else:
logger.log(u"Unknown SR launch method, please file a bug report about this", logger.ERROR)
popen_list = [sys.executable, os.path.join(sickbeard.PROG_DIR, 'updater.py'),
str(sickbeard.PID),
sys.executable,
sickbeard.MY_FULLNAME]
if popen_list:
popen_list += sickbeard.MY_ARGS
if '--nolaunch' not in popen_list:
popen_list += ['--nolaunch']
logger.log(u"Restarting SickRage with " + str(popen_list))
subprocess.Popen(popen_list, cwd=os.getcwd())
# system exit
os._exit(0)
if __name__ == "__main__":
if sys.hexversion >= 0x020600F0:
freeze_support()
# start sickrage
SickRage().start()
| bcorbet/SickRage | SickBeard.py | Python | gpl-3.0 | 20,304 |
from . import ApolloTestCase, wa
class MetricsTest(ApolloTestCase):
def test_get_metrics(self):
metrics = wa.metrics.get_metrics()
assert 'version' in metrics
assert 'gauges' in metrics
assert 'counters' in metrics
assert 'histograms' in metrics
assert 'meters' in metrics
assert 'timers' in metrics
assert 'org.bbop.apollo.AnnotationEditorController.annotationEditorTimer' in metrics['timers']
| erasche/python-apollo | test/metrics_test.py | Python | mit | 469 |
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="x", parent_name="layout.scene.camera.up", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "camera"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/layout/scene/camera/up/_x.py | Python | mit | 395 |
# Copyright 2012 Twitter
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('ab.views',
url(r'^(?P<app_slug>[\w-]+)/experiments/$', 'experiment_list', name='ab_experiment_list'),
url(r'^(?P<app_slug>[\w-]+)/experiment/(?P<experiment_slug>[\w-]+)/$', 'experiment_detail', name='ab_experiment_detail'),
url(r'^(?P<app_slug>[\w-]+)/create-experiment/$', 'experiment_create', name='ab_experiment_create'),
url(r'^(?P<app_slug>[\w-]+)/ab/stats/(?P<experiment_id>\d+)/$', 'experiment_stats', name='ab_experiment_stats'),
url(r'^(?P<app_slug>[\w-]+)/ab/csv/(?P<experiment_id>\d+)/$', 'experiment_csv', name='ab_experiment_csv'),
url(r'^(?P<app_slug>[\w-]+)/variation/(?P<variation_id>\d+)/change-name/$', 'variation_change_name', name='ab_variation_change_name'),
url(r'^(?P<app_slug>[\w-]+)/variation/(?P<variation_id>\d+)/remove/$', 'variation_remove', name='ab_variation_remove'),
url(r'^(?P<app_slug>[\w-]+)/experiment/(?P<experiment_id>\d+)/create-variation/$', 'variation_create', name='ab_variation_create'),
url(r'^(?P<app_slug>[\w-]+)/experiment/(?P<experiment_id>\d+)/save-data/$', 'experiment_save_data', name='ab_experiment_save_data'),
url(r'^(?P<app_slug>[\w-]+)/experiment/(?P<experiment_id>\d+)/delete/$', 'experiment_delete', name='ab_experiment_delete'),
url(r'^(?P<app_slug>[\w-]+)/ab-quickstart/$', 'quickstart', name='ab_quickstart'),
) | clutchio/clutch | ab/urls.py | Python | apache-2.0 | 1,965 |
"""
Steam OpenId backend, docs at:
http://psa.matiasaguirre.net/docs/backends/steam.html
"""
from social.backends.open_id import OpenIdAuth
from social.exceptions import AuthFailed
USER_INFO = 'http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?'
class SteamOpenId(OpenIdAuth):
name = 'steam'
URL = 'https://steamcommunity.com/openid'
def get_user_id(self, details, response):
"""Return user unique id provided by service"""
return self._user_id(response)
def get_user_details(self, response):
player = self.get_json(USER_INFO, params={
'key': self.setting('API_KEY'),
'steamids': self._user_id(response)
})
if len(player['response']['players']) > 0:
player = player['response']['players'][0]
details = {'username': player.get('personaname'),
'email': '',
'fullname': '',
'first_name': '',
'last_name': '',
'player': player}
else:
details = {}
return details
def _user_id(self, response):
user_id = response.identity_url.rsplit('/', 1)[-1]
if not user_id.isdigit():
raise AuthFailed(self, 'Missing Steam Id')
return user_id
| HackerEcology/SuggestU | suggestu/social/backends/steam.py | Python | gpl-3.0 | 1,336 |
"""Get status of disk."""
from status_base import mongo_collection
from status_base import mongo_database
from status_base import save
from status_base import schedule_log
from status_base import setup_environment
from status_base import get_parameters
from urllib.request import urlopen
setup_environment()
def status():
"""Run PM2 Monitor."""
schedule_log("Starting URL checker")
status = True
output = ''
data = {
'results': []
}
urls = get_parameters()
schedule_log('Got %s URLs' % len(urls))
schedule_log('%s' % urls)
for url in urls:
schedule_log('Checking: %s' % url)
try:
get_code = urlopen(url).getcode()
schedule_log('Got code: %s' % get_code)
data['results'].append({
'url': url,
'status': get_code
})
if get_code != 200:
status = False
except Exception as ex:
status = False
schedule_log('Exception: %s' % ex)
data['results'].append({
'url': url,
'status': '%s' % ex
})
save(status, data, mongo_database(), mongo_collection(), output)
schedule_log("Finished")
| CornerstoneLabs/service-dashboard | dashboard/fabric-plugins/status_url_check.py | Python | mit | 1,252 |
import logging
from coapthon import defines
from coapthon.messages.request import Request
from coapthon.messages.response import Response
logger = logging.getLogger(__name__)
__author__ = 'Giacomo Tanganelli'
class BlockItem(object):
def __init__(self, byte, num, m, size, payload=None, content_type=None):
"""
Data structure to store Block parameters
:param byte: the last byte exchanged
:param num: the num field of the block option
:param m: the M bit of the block option
:param size: the size field of the block option
:param payload: the overall payload received in all blocks
:param content_type: the content-type of the payload
"""
self.byte = byte
self.num = num
self.m = m
self.size = size
self.payload = payload
self.content_type = content_type
class BlockLayer(object):
"""
Handle the Blockwise options. Hides all the exchange to both servers and clients.
"""
def __init__(self):
self._block1_sent = {}
self._block2_sent = {}
self._block1_receive = {}
self._block2_receive = {}
def receive_request(self, transaction):
"""
Handles the Blocks option in a incoming request.
:type transaction: Transaction
:param transaction: the transaction that owns the request
:rtype : Transaction
:return: the edited transaction
"""
if transaction.request.block2 is not None:
host, port = transaction.request.source
key_token = hash(str(host) + str(port) + str(transaction.request.token))
num, m, size = transaction.request.block2
if key_token in self._block2_receive:
self._block2_receive[key_token].num = num
self._block2_receive[key_token].size = size
self._block2_receive[key_token].m = m
del transaction.request.block2
else:
# early negotiation
byte = 0
self._block2_receive[key_token] = BlockItem(byte, num, m, size)
del transaction.request.block2
elif transaction.request.block1 is not None:
# POST or PUT
host, port = transaction.request.source
key_token = hash(str(host) + str(port) + str(transaction.request.token))
num, m, size = transaction.request.block1
if transaction.request.size1 is not None:
# What to do if the size1 is larger than the maximum resource size or the maxium server buffer
pass
if key_token in self._block1_receive:
# n-th block
content_type = transaction.request.content_type
if num != self._block1_receive[key_token].num \
or content_type != self._block1_receive[key_token].content_type:
# Error Incomplete
return self.incomplete(transaction)
self._block1_receive[key_token].payload += transaction.request.payload
else:
# first block
if num != 0:
# Error Incomplete
return self.incomplete(transaction)
content_type = transaction.request.content_type
self._block1_receive[key_token] = BlockItem(size, num, m, size, transaction.request.payload,
content_type)
if m == 0:
transaction.request.payload = self._block1_receive[key_token].payload
# end of blockwise
del transaction.request.block1
transaction.block_transfer = False
del self._block1_receive[key_token]
return transaction
else:
# Continue
transaction.block_transfer = True
transaction.response = Response()
transaction.response.destination = transaction.request.source
transaction.response.token = transaction.request.token
transaction.response.code = defines.Codes.CONTINUE.number
transaction.response.block1 = (num, m, size)
num += 1
byte = size
self._block1_receive[key_token].byte = byte
self._block1_receive[key_token].num = num
self._block1_receive[key_token].size = size
self._block1_receive[key_token].m = m
return transaction
def receive_response(self, transaction):
"""
Handles the Blocks option in a incoming response.
:type transaction: Transaction
:param transaction: the transaction that owns the response
:rtype : Transaction
:return: the edited transaction
"""
host, port = transaction.response.source
key_token = hash(str(host) + str(port) + str(transaction.response.token))
if key_token in self._block1_sent and transaction.response.block1 is not None:
item = self._block1_sent[key_token]
transaction.block_transfer = True
if item.m == 0:
transaction.block_transfer = False
del transaction.request.block1
return transaction
n_num, n_m, n_size = transaction.response.block1
if n_num != item.num: # pragma: no cover
logger.warning("Blockwise num acknowledged error, expected " + str(item.num) + " received " +
str(n_num))
return None
if n_size < item.size:
logger.debug("Scale down size, was " + str(item.size) + " become " + str(n_size))
item.size = n_size
request = transaction.request
del request.mid
del request.block1
request.payload = item.payload[item.byte: item.byte+item.size]
item.num += 1
item.byte += item.size
if len(item.payload) <= item.byte:
m = 0
else:
m = 1
request.block1 = (item.num, m, item.size)
# The original request already has this option set
# request.size1 = len(item.payload)
elif transaction.response.block2 is not None:
num, m, size = transaction.response.block2
if m == 1:
transaction.block_transfer = True
if key_token in self._block2_sent:
item = self._block2_sent[key_token]
if num != item.num: # pragma: no cover
logger.error("Receive unwanted block")
return self.error(transaction, defines.Codes.REQUEST_ENTITY_INCOMPLETE.number)
if item.content_type is None:
item.content_type = transaction.response.content_type
if item.content_type != transaction.response.content_type: # pragma: no cover
logger.error("Content-type Error")
return self.error(transaction, defines.Codes.UNSUPPORTED_CONTENT_FORMAT.number)
item.byte += size
item.num = num + 1
item.size = size
item.m = m
item.payload += transaction.response.payload
else:
item = BlockItem(size, num + 1, m, size, transaction.response.payload,
transaction.response.content_type)
self._block2_sent[key_token] = item
request = transaction.request
del request.mid
del request.block2
request.block2 = (item.num, 0, item.size)
else:
transaction.block_transfer = False
if key_token in self._block2_sent:
if self._block2_sent[key_token].content_type != transaction.response.content_type: # pragma: no cover
logger.error("Content-type Error")
return self.error(transaction, defines.Codes.UNSUPPORTED_CONTENT_FORMAT.number)
transaction.response.payload = self._block2_sent[key_token].payload + transaction.response.payload
del self._block2_sent[key_token]
else:
transaction.block_transfer = False
return transaction
def receive_empty(self, empty, transaction):
"""
Dummy function. Used to do not broke the layered architecture.
:type empty: Message
:param empty: the received empty message
:type transaction: Transaction
:param transaction: the transaction that owns the empty message
:rtype : Transaction
:return: the transaction
"""
return transaction
def send_response(self, transaction):
"""
Handles the Blocks option in a outgoing response.
:type transaction: Transaction
:param transaction: the transaction that owns the response
:rtype : Transaction
:return: the edited transaction
"""
host, port = transaction.request.source
key_token = hash(str(host) + str(port) + str(transaction.request.token))
if (key_token in self._block2_receive and transaction.response.payload is not None) or \
(transaction.response.payload is not None and len(transaction.response.payload) > defines.MAX_PAYLOAD):
if key_token in self._block2_receive:
byte = self._block2_receive[key_token].byte
size = self._block2_receive[key_token].size
num = self._block2_receive[key_token].num
else:
byte = 0
num = 0
size = defines.MAX_PAYLOAD
m = 1
self._block2_receive[key_token] = BlockItem(byte, num, m, size)
if len(transaction.response.payload) > (byte + size):
m = 1
else:
m = 0
if (transaction.request.size2 is not None and transaction.request.size2 == 0) or \
(transaction.response.payload is not None and len(transaction.response.payload) > defines.MAX_PAYLOAD):
transaction.response.size2 = len(transaction.response.payload)
transaction.response.payload = transaction.response.payload[byte:byte + size]
del transaction.response.block2
transaction.response.block2 = (num, m, size)
self._block2_receive[key_token].byte += size
self._block2_receive[key_token].num += 1
if m == 0:
del self._block2_receive[key_token]
return transaction
def send_request(self, request):
"""
Handles the Blocks option in a outgoing request.
:type request: Request
:param request: the outgoing request
:return: the edited request
"""
assert isinstance(request, Request)
if request.block1 or (request.payload is not None and len(request.payload) > defines.MAX_PAYLOAD):
host, port = request.destination
key_token = hash(str(host) + str(port) + str(request.token))
if request.block1:
num, m, size = request.block1
else:
num = 0
m = 1
size = defines.MAX_PAYLOAD
request.size1 = len(request.payload)
self._block1_sent[key_token] = BlockItem(size, num, m, size, request.payload, request.content_type)
request.payload = request.payload[0:size]
del request.block1
request.block1 = (num, m, size)
elif request.block2:
host, port = request.destination
key_token = hash(str(host) + str(port) + str(request.token))
num, m, size = request.block2
item = BlockItem(size, num, m, size, "", None)
self._block2_sent[key_token] = item
return request
return request
@staticmethod
def incomplete(transaction):
"""
Notifies incomplete blockwise exchange.
:type transaction: Transaction
:param transaction: the transaction that owns the response
:rtype : Transaction
:return: the edited transaction
"""
transaction.block_transfer = True
transaction.response = Response()
transaction.response.destination = transaction.request.source
transaction.response.token = transaction.request.token
transaction.response.code = defines.Codes.REQUEST_ENTITY_INCOMPLETE.number
return transaction
@staticmethod
def error(transaction, code): # pragma: no cover
"""
Notifies generic error on blockwise exchange.
:type transaction: Transaction
:param transaction: the transaction that owns the response
:rtype : Transaction
:return: the edited transaction
"""
transaction.block_transfer = True
transaction.response = Response()
transaction.response.destination = transaction.request.source
transaction.response.type = defines.Types["RST"]
transaction.response.token = transaction.request.token
transaction.response.code = code
return transaction
| mcfreis/CoAPthon | coapthon/layers/blocklayer.py | Python | mit | 13,495 |
# -*-coding:Utf-8 -*
# Copyright (c) 2015 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'renommer' de la commande 'orbe'."""
from primaires.format.fonctions import supprimer_accents
from primaires.interpreteur.masque.parametre import Parametre
class PrmRenommer(Parametre):
"""Commande 'orbe renommer."""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "renommer", "rename")
self.tronquer = True
self.schema = "<nom_objet> comme/as <message>"
self.aide_courte = "renomme un orbe"
self.aide_longue = \
"Cette commande permet de renommer un orbe, c'est-à-dire " \
"changer le nom grâce auquel un autre orbe pourra le " \
"contacter directement. En d'autre terme, un orbe sans " \
"nom (ce qui est le cas par défaut) ne pourra pas recevoir " \
"de message privé. Pour attribuer un nom à un orbe, ou " \
"changer ce nom, précisez un fragment du nom de l'orbe " \
"suivi du mot-clé |ent|comme|ff| (ou |ent|as|ff| en " \
"anglais) puis du nouveau nom. Vous pouvez choisir le " \
"nom que vous voulez, tant qu'il n'est pas déjà utilisé. " \
"Gardez à l'esprit qu'il doit être court et que vous " \
"aurez à le communiquer RP aux joueurs par lesquels vous " \
"acceptez d'être contacté en RP. Changer le nom de l'orbe " \
"après coup revient un peu à changer les serrures d'une " \
"porte : vos anciens correspondants ne sauront pas comment " \
"vous joindre. Aussi, il peut être utile de choisir un " \
"nom court, logique et orienté RP que vous puissiez " \
"transmettre une fois pour toute et ne changerez plus."
def ajouter(self):
"""Méthode appelée lors de l'ajout de la commande à l'interpréteur"""
nom_objet = self.noeud.get_masque("nom_objet")
nom_objet.proprietes["conteneurs"] = \
"(personnage.equipement.inventaire_simple.iter_objets_qtt(" \
"True), )"
nom_objet.proprietes["quantite"] = "True"
nom_objet.proprietes["conteneur"] = "True"
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
objets = list(dic_masques["nom_objet"].objets_qtt_conteneurs)
objets = [c[0] for c in objets]
message = dic_masques["message"].message
orbe = objets[0]
if not orbe.est_de_type("orbe"):
personnage << "|err|{} n'est pas un orbe.|ff|".format(
pavillon.get_nom().capitalize())
return
nom = supprimer_accents(message).lower()
if not supprimer_accents(nom).isalpha():
personnage << "|err|Le nom {} est invalide.|ff|".format(nom)
return
orbes = importeur.objet.get_objets_de_type("orbe")
noms = [o.nom_orbe for o in orbes]
if nom in noms:
personnage << "|err|Ce nom d'orbe est déjà utilisé.|ff|"
return
orbe.nom_orbe = nom
personnage << "{} est à présent nommé {}.".format(orbe.nom_singulier,
nom)
| stormi/tsunami | src/primaires/communication/commandes/orbe/renommer.py | Python | bsd-3-clause | 4,745 |
#!/usr/bin/python3
#
__author__ = 'Chris Burton'
doc = """
Usage:
fwparse.py [-o FILE]
fwparse.py [-s FILE]
fwparse.py [(-q -o FILE)]
fwparse.py [-d]
fwparse.py [-h]
Options:
-o FILE Set an output file
-s FILE Reference a settings file outside this directory
-q Quietly, must use -o
-d Displays Debug Output
-h This Screen
"""
import json
import func
import os.path
import requests
import yaml
from collections import OrderedDict
from json import JSONDecoder
from docopt import docopt
if __name__ == '__main__':
arguments = docopt(doc, version='0.1')
func.debugoutput(arguments['-d'], arguments)
# Take actions based on command line arguments
if arguments['-s'] != None:
if os.path.exists(arguments['-s']) == True:
func.outputting(arguments['-q'], arguments['-o'], "Using Command Line Specified Settings File")
settings = yaml.load(open(arguments['-s'], 'r'))
else:
func.outputting(arguments['-q'], arguments['-o'], arguments['-s'] + " does not exist.\n")
quit()
else:
settings = yaml.load(open("settings.yml", 'r'))
# Validate Quiet Enablement
if arguments['-q'] != None and arguments['-q'] == True :
if arguments['-o'] == None:
print('-o must be specified if using -q')
quit()
# Set up the URL
url = 'https://' + settings['host'] + '/v1/'
# Get the AuthHeader and make sure we can access the API
authHeader = func.authenticate(arguments, settings)
# Make the API Call and retrieve the JSON
PolicyData = func.apiCall(url + 'firewall_policies', 'get', authHeader, arguments, settings)
func.debugoutput(arguments['-d'], str(PolicyData))
# Check to see if there is anything worth seeing.
if PolicyData['count'] == 0:
func.outputting(arguments['-q'], arguments['-o'], 'No Policies Available.')
quit()
ZoneData = func.apiCall(url + 'firewall_zones', 'get', authHeader, arguments, settings)
# Print a Header
MyLine = 'Source Name|Source IP(s)|Destination Zone|Service Name|Port|Protocol|Active|Direction|Action|Description'
func.outputting(arguments['-q'], arguments['-o'], MyLine)
# Cycle through the zones to find who uses them and what they are configured for.
for s in ZoneData['firewall_zones']:
# Clear out from previous loop
policyID = ''
# Set the Description for this zone and deal with the cruft inside this string
description = str(s['description']).replace('|',' ').replace('\r', ' ').replace('\n', ' ')
# Look to see if this zone has a policy associated with it.
try:
policyID = str(s['used_by'][0]['id'])
except IndexError:
pass
# There isn't a policy on this zone, then we aren't going to worry about it.
if policyID != '':
# Make an API Call to see what the rules are for this policy
RuleData = func.apiCall(url + 'firewall_policies/' + policyID + '/firewall_rules', 'get', authHeader, arguments, settings)
# Cycle through the rules for this policy
for rule in RuleData['firewall_rules']:
ruleID = rule['id']
# Deal with the ones that may or may not be in there.
try:
destinationname = rule['firewall_source']['name']
servicename = rule['firewall_service']['name']
serviceport = rule['firewall_service']['port']
protocol = rule['firewall_service']['protocol']
except KeyError:
pass
except TypeError:
pass
# Construct the line to be written
NewLine = s['name'] + '|' + s['ip_address'] + '|' + \
destinationname + '|' + \
str(servicename) + '|' + \
str(serviceport) + '|' + \
str(protocol) + '|' + \
str(rule['active']) + '|' + \
rule['chain'] + '|' + \
rule['action'] + '|' + \
description
# Don't reprint if one of the other values makes the rule different
if NewLine != MyLine:
func.outputting(arguments['-q'], arguments['-o'], NewLine)
MyLine = NewLine
| cyberhiker/Halo-Scripts | cp-fwparse/fwparse.py | Python | mit | 4,204 |
"""
Tool: tweets per (day, hour, minute) figure, shown and saved optionally
"""
import argparse
import seaborn as sns
import matplotlib.pyplot as plt
from pymongo import MongoClient
from datetime import datetime, timedelta
## COMMANDLINE ################################################################
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--user", default="smapp_readOnly")
parser.add_argument("-w", "--password", required=True)
args = parser.parse_args()
## CONFIG #####################################################################
start = datetime(2010,1,1) # Time in UTC
step_size = timedelta(days=10) # Time step to observe (timedelta(hours=1))
num_steps = 200 # Number of steps to plot
client = MongoClient("smapp-politics", 27011) # Dataserver host, port
database = client["USLegislator"] # Database
collection = database["tweets"] # Tweet collection
plot_title = "USLEG: Tweets per 10-day"
x_label = "Time"
y_label = "Tweets"
transparency = 0.70 # Bar transparency
bar_width = 0.8 # Bar width
x_label_step = 5 # How often to show an x-label
## MAIN #######################################################################
# Auth to DB
if not database.authenticate(args.user, args.password):
raise Exception("DB authentication failed")
times = [start + (i * step_size) for i in range(num_steps)]
counts = []
for step in times:
tweets = collection.find({"timestamp": {"$gte": step, "$lt": step + step_size}})
counts.append(tweets.count())
sns.set_style("darkgrid")
sns.set_palette("husl")
bars = plt.bar(range(num_steps),
counts,
width=bar_width,
linewidth=0.0,
alpha=transparency,
align="edge")
plt.xlim(0, num_steps)
plt.tick_params(axis="x", which="both", bottom="on", top="off", length=8, width=1, color="#999999")
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(plot_title)
if step_size.total_seconds() < 60*60:
plt.xticks(range(num_steps)[::x_label_step],
["{0}:{1}".format(t.hour, t.minute) for t in times[::x_label_step]],
rotation=90)
elif step_size.total_seconds() < 60*60*24:
plt.xticks(range(num_steps)[::x_label_step],
["{0}-{1} {2}:{3}".format(t.month, t.day, t.hour, t.minute) for t in times[::x_label_step]],
rotation=90)
else:
plt.xticks(range(num_steps)[::x_label_step],
["{0}-{1}-{2}".format(t.year, t.month, t.day) for t in times[::x_label_step]],
rotation=90)
plt.tight_layout()
plt.show()
| SMAPPNYU/smappPy | smappPy/tools/figure_makers/tweets_per_timestep.py | Python | gpl-2.0 | 2,624 |
# -*- encoding: utf-8 -*-
#
# Copyright 2015 Hewlett Packard Development Company, LP
# Copyright 2015 Universidade Federal de Campina Grande
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import importutils
from oslo_utils import uuidutils
from ironic.common import boot_devices
from ironic.common import driver_factory
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers.modules.oneview import common
from ironic.drivers.modules.oneview import management
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
oneview_exceptions = importutils.try_import('oneview_client.exceptions')
@mock.patch.object(common, 'get_oneview_client', spect_set=True, autospec=True)
class OneViewManagementDriverTestCase(db_base.DbTestCase):
def setUp(self):
super(OneViewManagementDriverTestCase, self).setUp()
self.config(manager_url='https://1.2.3.4', group='oneview')
self.config(username='user', group='oneview')
self.config(password='password', group='oneview')
mgr_utils.mock_the_extension_manager(driver="fake_oneview")
self.driver = driver_factory.get_driver("fake_oneview")
self.node = obj_utils.create_test_node(
self.context, driver='fake_oneview',
properties=db_utils.get_test_oneview_properties(),
driver_info=db_utils.get_test_oneview_driver_info(),
)
self.info = common.get_oneview_info(self.node)
@mock.patch.object(common, 'validate_oneview_resources_compatibility',
spect_set=True, autospec=True)
def test_validate(self, mock_validate, mock_get_ov_client):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.management.validate(task)
self.assertTrue(mock_validate.called)
def test_validate_fail(self, mock_get_ov_client):
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
id=999,
driver='fake_oneview')
with task_manager.acquire(self.context, node.uuid) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.management.validate, task)
@mock.patch.object(common, 'validate_oneview_resources_compatibility',
spect_set=True, autospec=True)
def test_validate_fail_exception(self, mock_validate, mock_get_ov_client):
mock_validate.side_effect = exception.OneViewError('message')
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.validate,
task)
def test_get_properties(self, mock_get_ov_client):
expected = common.COMMON_PROPERTIES
self.assertItemsEqual(expected,
self.driver.management.get_properties())
def test_set_boot_device(self, mock_get_ov_client):
oneview_client = mock_get_ov_client()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.management.set_boot_device(task, boot_devices.PXE)
oneview_client.set_boot_device.assert_called_once_with(
self.info,
management.BOOT_DEVICE_MAPPING_TO_OV.get(boot_devices.PXE)
)
def test_set_boot_device_invalid_device(self, mock_get_ov_client):
oneview_client = mock_get_ov_client()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.management.set_boot_device,
task, 'fake-device')
self.assertFalse(oneview_client.set_boot_device.called)
def test_set_boot_device_fail_to_get_server_profile(self,
mock_get_ov_client):
oneview_client = mock_get_ov_client()
oneview_client.get_server_profile_from_hardware.side_effect = \
oneview_exceptions.OneViewException()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.OneViewError,
self.driver.management.set_boot_device,
task, 'disk')
self.assertFalse(oneview_client.set_boot_device.called)
def test_set_boot_device_without_server_profile(self, mock_get_ov_client):
oneview_client = mock_get_ov_client()
oneview_client.get_server_profile_from_hardware.return_value = False
with task_manager.acquire(self.context, self.node.uuid) as task:
expected_msg = (
'A Server Profile is not associated with node %s.'
% self.node.uuid
)
self.assertRaisesRegexp(
exception.OperationNotPermitted,
expected_msg,
self.driver.management.set_boot_device,
task,
'disk'
)
def test_get_supported_boot_devices(self, mock_get_ov_client):
with task_manager.acquire(self.context, self.node.uuid) as task:
expected = [boot_devices.PXE, boot_devices.DISK,
boot_devices.CDROM]
self.assertItemsEqual(
expected,
task.driver.management.get_supported_boot_devices(task),
)
def test_get_boot_device(self, mock_get_ov_client):
device_mapping = management.BOOT_DEVICE_MAPPING_TO_OV
oneview_client = mock_get_ov_client()
with task_manager.acquire(self.context, self.node.uuid) as task:
# For each known device on OneView, Ironic should return its
# counterpart value
for device_ironic, device_ov in device_mapping.items():
oneview_client.get_boot_order.return_value = [device_ov]
expected_response = {
'boot_device': device_ironic,
'persistent': True
}
response = self.driver.management.get_boot_device(task)
self.assertEqual(expected_response, response)
oneview_client.get_boot_order.assert_called_with(self.info)
def test_get_boot_device_fail(self, mock_get_ov_client):
oneview_client = mock_get_ov_client()
oneview_client.get_boot_order.side_effect = \
oneview_exceptions.OneViewException()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.OneViewError,
self.driver.management.get_boot_device,
task)
oneview_client.get_boot_order.assert_called_with(self.info)
def test_get_boot_device_unknown_device(self, mock_get_ov_client):
oneview_client = mock_get_ov_client()
oneview_client.get_boot_order.return_value = ["spam",
"bacon"]
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(
exception.InvalidParameterValue,
task.driver.management.get_boot_device,
task
)
def test_get_sensors_data_not_implemented(self, mock_get_ov_client):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(
NotImplementedError,
task.driver.management.get_sensors_data,
task
)
| devananda/ironic | ironic/tests/unit/drivers/modules/oneview/test_management.py | Python | apache-2.0 | 8,420 |
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import argparse
import sys
import os
import pwd
import json
from urlparse import urlparse
from snakebite.client import HAClient
from snakebite.errors import FileNotFoundException
from snakebite.errors import DirectoryException
from snakebite.errors import FileException
from snakebite.errors import RequestError
from snakebite.formatter import format_listing
from snakebite.formatter import format_results
from snakebite.formatter import format_counts
from snakebite.formatter import format_fs_stats
from snakebite.formatter import format_stat
from snakebite.formatter import format_du
from snakebite.config import HDFSConfig
from snakebite.version import version
from snakebite.namenode import Namenode
def print_error_exit(msg, fd=sys.stderr):
print >> fd, "Error: %s" % msg
sys.exit(-1)
def print_info(msg, fd=sys.stderr):
print >> fd, "Info: %s" % msg
def exitError(exc_info):
exc_type, exc_value, exc_traceback = exc_info
if isinstance(
exc_value, (FileNotFoundException, DirectoryException, FileException),
):
print str(exc_value)
elif isinstance(exc_value, RequestError):
print "Request error: %s" % str(exc_value)
else:
raise exc_type, exc_value, exc_traceback
sys.exit(-1)
def command(args="", descr="", allowed_opts="", visible=True):
def wrap(f):
Commands.methods[f.func_name] = {"method": f,
"args": args,
"descr": descr,
"allowed_opts": allowed_opts,
"visible": visible}
return wrap
class Commands(object):
methods = {}
class ArgumentParserError(Exception):
def __init__(self, message, error_message, prog, stdout=None, stderr=None, error_code=None):
Exception.__init__(self, message, stdout, stderr)
self.message = message
self.error_message = error_message
self.prog = prog
class Parser(argparse.ArgumentParser):
def print_help(self):
print ''.join([self.usage, self.epilog])
def error(self, message): # Override error message to show custom help.
raise ArgumentParserError("SystemExit", message, self.prog)
class CommandLineParser(object):
GENERIC_OPTS = {'D': {"short": '-D',
"long": '--debug',
"help": 'Show debug information',
"action": 'store_true'},
'j': {"short": '-j',
"long": '--json',
"help": 'JSON output',
"action": 'store_true'},
'n': {"short": '-n',
"long": '--namenode',
"help": 'namenode host',
"type": str},
'V': {"short": '-V',
"long": '--version',
"help": 'Hadoop protocol version (default:%d)' % Namenode.DEFAULT_VERSION,
"default": Namenode.DEFAULT_VERSION,
"type": float},
'p': {"short": '-p',
"long": '--port',
"help": 'namenode RPC port (default: %d)' % Namenode.DEFAULT_PORT,
"type": int},
'h': {"short": '-h',
"long": '--help',
"help": 'show help',
"type": int},
'v': {"short": '-v',
"long": '--ver',
"help": 'Display snakebite version',
"type": int}
}
SUB_OPTS = {'R': {"short": '-R',
"long": '--recurse',
"help": 'recurse into subdirectories',
"action": 'store_true'},
'd': {"short": '-d',
"long": '--directory',
"help": 'show only the path and no children / check if path is a dir',
"action": 'store_true'},
's': {"short": '-s',
"long": '--summary',
"help": 'print summarized output',
"action": 'store_true'},
'S': {"short": '-S',
"long": '--skiptrash',
"help": 'skip the trash (when trash is enabled)',
"default": False,
"action": 'store_true'},
'T': {"short": '-T',
"long": "--usetrash",
"help": "enable the trash",
"action": 'store_true'},
'z': {"short": '-z',
"long": '--zero',
"help": 'check for zero length',
"action": 'store_true'},
'e': {"short": '-e',
"long": '--exists',
"help": 'check if file exists',
"action": 'store_true'},
'checkcrc': {"short": '-checkcrc',
"long": "--checkcrc",
"help": 'check Crc',
"action": 'store_true'},
'f': {"short": '-f',
"long": "--append",
"help": 'show appended data as the file grows',
"action": 'store_true'},
'nl': {"short": '-nl',
"long": "--newline",
"help": 'add a newline character at the end of each file.',
"action": 'store_true'},
'h': {"short": '-h',
"long": '--human',
"help": 'human readable output',
"action": 'store_true'}
}
def __init__(self):
usage = "snakebite [general options] cmd [arguments]"
epilog = "\ngeneral options:\n"
epilog += "\n".join(sorted([" %-30s %s" % ("%s %s" % (v['short'], v['long']), v['help']) for k, v in self.GENERIC_OPTS.iteritems()]))
epilog += "\n\ncommands:\n"
epilog += "\n".join(sorted([" %-30s %s" % ("%s %s" % (k, v['args']), v['descr']) for k, v in Commands.methods.iteritems() if v['visible']]))
epilog += "\n\nto see command-specific options use: snakebite [cmd] --help"
self.parser = Parser(usage=usage, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter, add_help=False)
self._build_parent_parser()
self._add_subparsers()
self.namenodes = []
def _build_parent_parser(self):
#general options
for opt_name, opt_data in self.GENERIC_OPTS.iteritems():
if 'action' in opt_data:
self.parser.add_argument(opt_data['short'], opt_data['long'], help=opt_data['help'], action=opt_data['action'])
else:
if 'default' in opt_data:
self.parser.add_argument(opt_data['short'], opt_data['long'], help=opt_data['help'], type=opt_data['type'], default=opt_data['default'])
else:
self.parser.add_argument(opt_data['short'], opt_data['long'], help=opt_data['help'], type=opt_data['type'])
def _add_subparsers(self):
default_dir = os.path.join("/user", pwd.getpwuid(os.getuid())[0])
#sub-options
arg_parsers = {}
for opt_name, opt_data in self.SUB_OPTS.iteritems():
arg_parsers[opt_name] = argparse.ArgumentParser(add_help=False)
arg_parsers[opt_name].add_argument(opt_data['short'], opt_data['long'], help=opt_data['help'],
action=opt_data['action'])
subcommand_help_parser = argparse.ArgumentParser(add_help=False)
subcommand_help_parser.add_argument('-H', '--help', action='store_true')
# NOTE: args and dirs are logically equivalent except for default val.
# Difference in naming gives more valuable error/help output.
# 0 or more dirs
positional_arg_parsers = {}
positional_arg_parsers['[dirs]'] = argparse.ArgumentParser(add_help=False)
positional_arg_parsers['[dirs]'].add_argument('dir', nargs='*', default=[default_dir], help="[dirs]")
# 1 or more dirs
positional_arg_parsers['dir [dirs]'] = argparse.ArgumentParser(add_help=False)
positional_arg_parsers['dir [dirs]'].add_argument('dir', nargs='+', default=[default_dir], help="dir [dirs]")
# 2 dirs
positional_arg_parsers['src dst'] = argparse.ArgumentParser(add_help=False)
positional_arg_parsers['src dst'].add_argument('src_dst', nargs=2, default=[default_dir], help="src dst")
# 1 or more args
positional_arg_parsers['[args]'] = argparse.ArgumentParser(add_help=False)
positional_arg_parsers['[args]'].add_argument('arg', nargs='*', help="[args]")
# 1 arg
positional_arg_parsers['arg'] = argparse.ArgumentParser(add_help=False)
positional_arg_parsers['arg'].add_argument('single_arg', default=default_dir, help="arg")
# 1 (integer) arg
positional_arg_parsers['(int) arg'] = argparse.ArgumentParser(add_help=False)
positional_arg_parsers['(int) arg'].add_argument('single_int_arg', default='0', help="(integer) arg",
type=int)
subparsers = self.parser.add_subparsers()
for cmd_name, cmd_info in Commands.methods.iteritems():
parents = [arg_parsers[opt] for opt in cmd_info['allowed_opts'] if opt in arg_parsers]
parents += [subcommand_help_parser]
if 'req_args' in cmd_info and not cmd_info['req_args'] is None:
parents += [positional_arg_parsers[arg] for arg in cmd_info['req_args']]
command_parser = subparsers.add_parser(cmd_name, add_help=False, parents=parents)
command_parser.set_defaults(command=cmd_name)
def init(self):
self.read_config()
self._clean_args()
self.setup_client()
def _clean_args(self):
for path in self.__get_all_directories():
if path.startswith('hdfs://'):
parse_result = urlparse(path)
if path in self.args.dir:
self.args.dir.remove(path)
self.args.dir.append(parse_result.path)
else:
self.args.single_arg = parse_result.path
def __usetrash_unset(self):
return not 'usetrash' in self.args or self.args.usetrash == False
def __use_cl_port_first(self, alt):
# Port provided from CL has the highest priority:
return self.args.port if self.args.port else alt
def read_config(self):
# Try to retrieve namenode config from within CL arguments
if self._read_config_cl():
return
config_file = os.path.join(os.path.expanduser('~'), '.snakebiterc')
if os.path.exists(config_file):
#if ~/.snakebiterc exists - read config from it
self._read_config_snakebiterc()
elif os.path.exists('/etc/snakebiterc'):
self._read_config_snakebiterc('/etc/snakebiterc')
else:
# Try to read the configuration for HDFS configuration files
configs = HDFSConfig.get_external_config()
# if configs exist and contain something
if configs:
for config in configs:
nn = Namenode(config['namenode'],
self.__use_cl_port_first(config['port']))
self.namenodes.append(nn)
if self.__usetrash_unset():
self.args.usetrash = HDFSConfig.use_trash
if len(self.namenodes):
return
else:
print "No ~/.snakebiterc found, no HADOOP_HOME set and no -n and -p provided"
print "Tried to find core-site.xml in:"
for core_conf_path in HDFSConfig.core_try_paths:
print " - %s" % core_conf_path
print "Tried to find hdfs-site.xml in:"
for hdfs_conf_path in HDFSConfig.hdfs_try_paths:
print " - %s" % hdfs_conf_path
print "\nYou can manually create ~/.snakebiterc with the following content:"
print '{'
print ' "config_version": 2,'
print ' "use_trash": true,'
print ' "namenodes": ['
print ' {"host": "namenode-ha1", "port": %d, "version": %d},' % (Namenode.DEFAULT_PORT, Namenode.DEFAULT_VERSION)
print ' {"host": "namenode-ha2", "port": %d, "version": %d}' % (Namenode.DEFAULT_PORT, Namenode.DEFAULT_VERSION)
print ' ]'
print '}'
sys.exit(1)
def _read_config_snakebiterc(self, path = os.path.join(os.path.expanduser('~'), '.snakebiterc')):
old_version_info = "You're are using snakebite %s with Trash support together with old snakebiterc, please update/remove your %s file. By default Trash is %s." % (path, version(), 'disabled' if not HDFSConfig.use_trash else 'enabled')
with open(path) as config_file:
configs = json.load(config_file)
if isinstance(configs, list):
# Version 1: List of namenodes
# config is a list of namenode(s) - possibly HA
for config in configs:
nn = Namenode(config['namenode'],
self.__use_cl_port_first(config.get('port', Namenode.DEFAULT_PORT)),
config.get('version', Namenode.DEFAULT_VERSION))
self.namenodes.append(nn)
if self.__usetrash_unset():
# commandline setting has higher priority
print_info(old_version_info)
# There's no info about Trash in version 1, use default policy:
self.args.usetrash = HDFSConfig.use_trash
elif isinstance(configs, dict):
# Version 2: {}
# Can be either new configuration or just one namenode
# which was the very first configuration syntax
if 'config_version' in configs:
# Config version => 2
for nn_config in configs['namenodes']:
nn = Namenode(nn_config['host'],
self.__use_cl_port_first(nn_config.get('port', Namenode.DEFAULT_PORT)),
nn_config.get('version', Namenode.DEFAULT_VERSION))
self.namenodes.append(nn)
if self.__usetrash_unset():
# commandline setting has higher priority
self.args.usetrash = configs.get("use_trash", HDFSConfig.use_trash)
else:
# config is a single namenode - no HA
self.namenodes.append(Namenode(configs['namenode'],
self.__use_cl_port_first(configs.get('port', Namenode.DEFAULT_PORT)),
configs.get('version', Namenode.DEFAULT_VERSION)))
if self.__usetrash_unset():
# commandline setting has higher priority
print_info(old_version_info)
self.args.usetrash = HDFSConfig.use_trash
else:
print_error_exit("Config retrieved from %s is corrupted! Remove it!" % path)
def __get_all_directories(self):
if self.args and 'dir' in self.args:
dirs_to_check = list(self.args.dir)
if self.args.command == 'mv':
dirs_to_check.append(self.args.single_arg)
return dirs_to_check
else:
return ()
def _read_config_cl(self):
''' Check if any directory arguments contain hdfs://'''
dirs_to_check = self.__get_all_directories()
hosts, ports = [], []
for path in dirs_to_check:
if path.startswith('hdfs://'):
parse_result = urlparse(path)
hosts.append(parse_result.hostname)
ports.append(parse_result.port)
# remove duplicates and None from (hosts + self.args.namenode)
hosts = filter(lambda x: x != None, set(hosts + [self.args.namenode]))
if len(hosts) > 1:
print_error_exit('Conficiting namenode hosts in commandline arguments, hosts: %s' % str(hosts))
ports = filter(lambda x: x != None, set(ports + [self.args.port]))
if len(ports) > 1:
print_error_exit('Conflicting namenode ports in commandline arguments, ports: %s' % str(ports))
# Store port from CL in arguments - CL port has the highest priority
if len(ports) == 1:
self.args.port = ports[0]
# do we agree on one namenode?
if len(hosts) == 1 and len(ports) <= 1:
self.args.namenode = hosts[0]
self.args.port = ports[0] if len(ports) == 1 else Namenode.DEFAULT_PORT
self.namenodes.append(Namenode(self.args.namenode, self.args.port))
# we got the info from CL -> check if use_trash is set - if not use default policy:
if self.__usetrash_unset():
self.args.usetrash = HDFSConfig.use_trash
return True
else:
return False
def parse(self, non_cli_input=None): # Allow input for testing purposes
if not sys.argv[1:] and not non_cli_input:
self.parser.print_help()
sys.exit(-1)
try:
args = self.parser.parse_args(non_cli_input)
except ArgumentParserError, error:
if "-h" in sys.argv or "--help" in sys.argv: # non cli input?
commands = [cmd for (cmd, description) in Commands.methods.iteritems() if description['visible'] is True]
command = error.prog.split()[-1]
if command in commands:
self.usage_helper(command)
else:
self.parser.print_help()
self.parser.exit(2)
elif "-v" in sys.argv or "--ver" in sys.argv:
print version()
self.parser.exit(0)
else:
self.parser.print_usage(sys.stderr)
self.parser.exit(2, 'error: %s. Use -h for help.\n' % (error.error_message))
self.cmd = args.command
self.args = args
return self.args
def setup_client(self):
if 'skiptrash' in self.args:
use_trash = self.args.usetrash and not self.args.skiptrash
else:
use_trash = self.args.usetrash
self.client = HAClient(self.namenodes, use_trash)
def execute(self):
if self.args.help:
#if 'ls -H' is called, execute 'usage ls'
self.args.arg = [self.cmd]
return Commands.methods['usage']['method'](self)
if not Commands.methods.get(self.cmd):
self.parser.print_help()
sys.exit(-1)
try:
return Commands.methods[self.cmd]['method'](self)
except Exception:
exitError(sys.exc_info())
def command(args="", descr="", allowed_opts="", visible=True, req_args=None):
def wrap(f):
Commands.methods[f.func_name] = {"method": f,
"args": args,
"descr": descr,
"allowed_opts": allowed_opts,
"visible": visible,
"req_args": req_args}
return wrap
@command(visible=False)
def commands(self):
print "\n".join(sorted([k for k, v in Commands.methods.iteritems() if v['visible']]))
@command(args="[path]", descr="Used for command line completion", visible=False, req_args=['[dirs]'])
def complete(self):
self.args.summary = True
self.args.directory = False
self.args.recurse = False
self.args.human = False
try:
for line in self._listing():
print line.replace(" ", "\\\\ ")
except FileNotFoundException:
pass
@command(args="[paths]", descr="list a path", allowed_opts=["d", "R", "s", "h"], req_args=['[dirs]'])
def ls(self):
for line in self._listing():
print line
def _listing(self):
# Mimicking hadoop client behaviour
if self.args.directory:
include_children = False
recurse = False
include_toplevel = True
else:
include_children = True
include_toplevel = False
recurse = self.args.recurse
listing = self.client.ls(self.args.dir, recurse=recurse,
include_toplevel=include_toplevel,
include_children=include_children)
for line in format_listing(listing, json_output=self.args.json,
human_readable=self.args.human,
recursive=recurse,
summary=self.args.summary):
yield line
@command(args="[paths]", descr="create directories", req_args=['dir [dirs]'])
def mkdir(self):
creations = self.client.mkdir(self.args.dir)
for line in format_results(creations, json_output=self.args.json):
print line
@command(args="[paths]", descr="create directories and their parents", req_args=['dir [dirs]'])
def mkdirp(self):
creations = self.client.mkdir(self.args.dir, create_parent=True)
for line in format_results(creations, json_output=self.args.json):
print line
@command(args="<owner:grp> [paths]", descr="change owner", allowed_opts=["R"], req_args=['arg', 'dir [dirs]'])
def chown(self):
owner = self.args.single_arg
try:
mods = self.client.chown(self.args.dir, owner, recurse=self.args.recurse)
for line in format_results(mods, json_output=self.args.json):
print line
except FileNotFoundException:
exitError(sys.exc_info())
@command(args="<mode> [paths]", descr="change file mode (octal)", allowed_opts=["R"], req_args=['(int) arg', 'dir [dirs]'])
def chmod(self):
mode = int(str(self.args.single_int_arg), 8)
mods = self.client.chmod(self.args.dir, mode, recurse=self.args.recurse)
for line in format_results(mods, json_output=self.args.json):
print line
@command(args="<grp> [paths]", descr="change group", allowed_opts=["R"], req_args=['arg', 'dir [dirs]'])
def chgrp(self):
grp = self.args.single_arg
mods = self.client.chgrp(self.args.dir, grp, recurse=self.args.recurse)
for line in format_results(mods, json_output=self.args.json):
print line
@command(args="[paths]", descr="display stats for paths", allowed_opts=['h'], req_args=['[dirs]'])
def count(self):
counts = self.client.count(self.args.dir)
for line in format_counts(counts, json_output=self.args.json,
human_readable=self.args.human):
print line
@command(args="", descr="display fs stats", allowed_opts=['h'])
def df(self):
result = self.client.df()
for line in format_fs_stats(result, json_output=self.args.json,
human_readable=self.args.human):
print line
@command(args="[paths]", descr="display disk usage statistics", allowed_opts=["s", "h"], req_args=['[dirs]'])
def du(self):
if self.args.summary:
include_children = False
include_toplevel = True
else:
include_children = True
include_toplevel = False
result = self.client.du(self.args.dir, include_toplevel=include_toplevel, include_children=include_children)
for line in format_du(result, json_output=self.args.json, human_readable=self.args.human):
print line
@command(args="[paths] dst", descr="move paths to destination", req_args=['dir [dirs]', 'arg'])
def mv(self):
paths = self.args.dir
dst = self.args.single_arg
result = self.client.rename(paths, dst)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="[paths]", descr="remove paths", allowed_opts=["R", "S", "T"], req_args=['dir [dirs]'])
def rm(self):
result = self.client.delete(self.args.dir, recurse=self.args.recurse)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="[paths]", descr="creates a file of zero length", req_args=['dir [dirs]'])
def touchz(self):
result = self.client.touchz(self.args.dir)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="", descr="show server information")
def serverdefaults(self):
print self.client.serverdefaults()
@command(args="[dirs]", descr="delete a directory", req_args=['dir [dirs]'])
def rmdir(self):
result = self.client.rmdir(self.args.dir)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="<rep> [paths]", descr="set replication factor", allowed_opts=['R'], req_args=['(int) arg', 'dir [dirs]'])
def setrep(self):
rep_factor = int(self.args.single_int_arg)
result = self.client.setrep(self.args.dir, rep_factor, recurse=self.args.recurse)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="<cmd>", descr="show cmd usage", req_args=['[args]'])
def usage(self):
if not 'arg' in self.args or self.args.arg == []:
self.parser.print_help()
sys.exit(-1)
for sub_cmd in self.args.arg:
self.usage_helper(sub_cmd)
def usage_helper(self, command):
cmd_entry = Commands.methods.get(command)
if not cmd_entry:
self.parser.print_help()
sys.exit(-1)
cmd_args = []
cmd_descriptions = "\ncommand options: \n"
allowed_opts = cmd_entry.get('allowed_opts')
if allowed_opts:
cmd_args += ["[-%s]" % o for o in allowed_opts]
cmd_descriptions += "\n".join(sorted([" %-30s %s" % ("%s %s" % (self.SUB_OPTS[o]['short'], self.SUB_OPTS[o]['long']), self.SUB_OPTS[o]['help']) for o in allowed_opts]))
args = cmd_entry.get('args')
if args:
cmd_args.append(args)
print "usage: snakebite [general options] %s %s" % (command, " ".join(cmd_args))
general_opts = "\ngeneral options:\n"
general_opts += "\n".join(sorted([" %-30s %s" % ("%s %s" % (v['short'], v['long']), v['help']) for k, v in self.GENERIC_OPTS.iteritems()]))
print general_opts
if allowed_opts:
print cmd_descriptions
@command(args="[paths]", descr="stat information", req_args=['dir [dirs]'])
def stat(self):
print format_stat(self.client.stat(self.args.dir), json_output=self.args.json)
@command(args="path", descr="test a path", allowed_opts=['d', 'z', 'e'], req_args=['arg'])
def test(self):
path = self.args.single_arg
try:
result = self.client.test(path, exists=self.args.exists, directory=self.args.directory, zero_length=self.args.zero)
except FileNotFoundException:
result = False
if result:
sys.exit(0)
else:
sys.exit(1)
@command(args="[paths]", descr="copy source paths to stdout", allowed_opts=['checkcrc'], req_args=['dir [dirs]'])
def cat(self):
for file_to_read in self.client.cat(self.args.dir, check_crc=self.args.checkcrc):
for load in file_to_read:
sys.stdout.write(load)
@command(args="path dst", descr="copy local file reference to destination", req_args=['dir [dirs]', 'arg'], visible=False)
def copyFromLocal(self):
src = self.args.dir
dst = self.args.single_arg
result = self.client.copyFromLocal(src, dst)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="[paths] dst", descr="copy paths to local file system destination", allowed_opts=['checkcrc'], req_args=['dir [dirs]', 'arg'])
def copyToLocal(self):
paths = self.args.dir
dst = self.args.single_arg
result = self.client.copyToLocal(paths, dst, check_crc=self.args.checkcrc)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="[paths] dst", descr="copy files from source to destination", allowed_opts=['checkcrc'], req_args=['dir [dirs]', 'arg'], visible=False)
def cp(self):
paths = self.args.dir
dst = self.args.single_arg
result = self.client.cp(paths, dst, checkcrc=self.args.checkcrc)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="file dst", descr="copy files to local file system destination", allowed_opts=['checkcrc'], req_args=['dir [dirs]', 'arg'])
def get(self):
paths = self.args.dir
dst = self.args.single_arg
result = self.client.copyToLocal(paths, dst, check_crc=self.args.checkcrc)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="dir dst", descr="concatenates files in source dir into destination local file", allowed_opts=['nl'], req_args=['src dst'])
def getmerge(self):
source = self.args.src_dst[0]
dst = self.args.src_dst[1]
result = self.client.getmerge(source, dst, newline=self.args.newline)
for line in format_results(result, json_output=self.args.json):
print line
# @command(args="[paths] dst", descr="copy sources from local file system to destination", req_args=['dir [dirs]', 'arg'])
# def put(self):
# paths = self.args.dir
# dst = self.args.single_arg
# result = self.client.put(paths, dst)
# for line in format_results(result, json_output=self.args.json):
# print line
@command(args="path", descr="display last kilobyte of the file to stdout", allowed_opts=['f'], req_args=['arg'])
def tail(self):
path = self.args.single_arg
result = self.client.tail(path, append=self.args.append)
for line in result:
print line
@command(args="path [paths]", descr="output file in text format", allowed_opts=['checkcrc'], req_args=['dir [dirs]'])
def text(self):
paths = self.args.dir
result = self.client.text(paths)
for line in result:
print line
| dgoldin/snakebite | snakebite/commandlineparser.py | Python | apache-2.0 | 31,673 |
# Copyright (c) 2016 RainMachine, Green Electronics LLC
# All rights reserved.
# Authors: Nicu Pavel <npavel@mini-box.com>
# Ciprian Misaila <ciprian.misaila@mini-box.com>
import urllib2, os, ssl, json
from RMUtilsFramework.rmLogging import log
class RMAPIClientProtocol:
"""
RainMachine currently supported protocols
"""
HTTPS = 1
HTTP = 2
@staticmethod
def getAsString(protocol):
if protocol == RMAPIClientProtocol.HTTPS:
return "https://"
return "http://"
class RMAPIClientCalls(object):
"""
RainMachine currently supported methods
"""
def __init__(self, restHandler):
self.GET = restHandler.get
self.POST = restHandler.post
self.REST = restHandler.rest
@classmethod
def callList(cls):
return [attr for attr in dir(cls) if not callable(attr) and not attr.startswith("__") and not attr == "callList"]
class RMAPIClientErrors:
"""
RainMachine client errors and status codes
"""
REQ = {"statusCode": 900, "message": "Can't create request object"}
OPEN = {"statusCode": 901, "message": "Can't open URL"}
JSON = {"statusCode": 902, "message": "Can't parse JSON"}
ID = {"statusCode": 903, "message": "No ID specified"}
PARAMS = {"statusCode": 904, "message": "No parameters specified"}
class RMAPIClientREST(object):
"""
RainMachine REST interface"
"""
def get(self, apiCall, isBinary = False, extraHeaders = None, asJSON = True):
return self.__rest("GET", apiCall, None, isBinary, extraHeaders, self._majorversion, asJSON)
def post(self, apiCall, data = None, isBinary = False, extraHeaders = None, asJSON = True):
return self.__rest("POST", apiCall, data, isBinary, extraHeaders, self._majorversion, asJSON)
def rest(self, type, apiCall, data = None, isBinary = False, extraHeaders = None, asJSON = True):
return self.__rest(type, apiCall, data, isBinary, extraHeaders, self._majorversion, asJSON)
def __rest(self, type, apiCall, data = None, isBinary = False, extraHeaders = None, majorVersion="", asJSON = True):
protocol = RMAPIClientProtocol.getAsString(self._protocol)
apiUrl = protocol + self._host + ":" + self._port + "/api/" + majorVersion + "/"
if self.token is None:
url = apiUrl + apiCall
else:
url = apiUrl + apiCall + "?access_token=" + self.token
try:
req = urllib2.Request(url)
req.get_method = lambda: type # Force GET/POST depending on type
except:
return RMAPIClientErrors.REQ
if data is not None:
if isBinary:
req.add_data(data=data)
else:
req.add_data(data=json.dumps(data))
req.add_header("Content-type", "text/plain")
req.add_header('User-Agent', "RMAPIClient")
if extraHeaders is not None:
for header in extraHeaders:
req.add_header(header)
try:
log.info("REST: %s : %s" % (req.get_method(), req.get_full_url()))
if self.context is not None:
r = urllib2.urlopen(req, context=self.context)
else:
r = urllib2.urlopen(req)
data = r.read()
except Exception, e:
log.error("Cannot OPEN URL: %s" % e)
return RMAPIClientErrors.OPEN
if asJSON:
try:
data = json.loads(data)
return data
except:
log.info("Cannot convert reply to JSON.")
return RMAPIClientErrors.JSON
return data
def __getApiVer(self):
data = self.__rest("GET", "apiVer")
if data is not None:
return data.get('apiVer', None)
return None
def __getContext(self):
try:
return ssl._create_unverified_context()
except:
return None
return None
def __init__(self, host="127.0.0.1", port="8080", protocol=RMAPIClientProtocol.HTTPS):
self.token = None
self._host = host
self._port = port
self._protocol = protocol
self._apiversion = None
self._majorversion = ""
self._minorversion = ""
self._patchversion = ""
self.context = self.__getContext()
self.apiversion = self.__getApiVer()
@property
def apiversion(self):
if self._apiversion is None:
self._apiversion = self.__getApiVer()
return self._apiversion
@apiversion.setter
def apiversion(self, value):
if value is not None:
self._apiversion = value
self._majorversion, self._minorversion, self._patchversion = self._apiversion.split(".")
if __name__ == "__main__":
assert RMAPIClientREST("127.2.0.1", "8180").apiversion is None
assert RMAPIClientREST("127.0.0.1", "8080").apiversion == "4.3.0"
| sprinkler/rainmachine-developer-resources | api-python/API4Client/rmAPIClientREST.py | Python | gpl-3.0 | 4,980 |
from main.db import *
from random import randint
class Game():
save = {'savefile': 0, 'firsttime': True}
hiredActors = []
filmedMovies = []
def setDefParams(self):
for x in range(len(defvalnames)):
self.save[defvalnames[x]] = defvalvals[x]
class Actor(object):
def __init__(self):
if randint(0,1) == 1:
self.name = female_actor_names[randint(0,len(female_actor_names)-1)]
self.gender = cat_gen[1]
self.bra = cat_bra[randint(0, len(cat_bra)-2)]
else:
self.name = male_actor_names[randint(0,len(male_actor_names)-1)]
self.gender = cat_gen[0]
self.bra = cat_bra[len(cat_bra)-1] #2 if male
self.surname = actor_surnames[randint(0,len(actor_surnames)-1)]
self.full_name = self.name + ' ' + self.surname
self.race = cat_race[randint(0,len(cat_race)-1)]
self.age = randint(18,40)
self.cats = ['0', '1', '2']
self.cats[0] = cat_cat[randint(0, len(cat_cat)-2)]
self.cats[1] = cat_cat[randint(0, len(cat_cat)-2)]
while self.cats[0] == self.cats[1]:
self.cats[1] = cat_cat[randint(0, len(cat_cat)-2)]
self.cats[2] = cat_cat[randint(0, len(cat_cat)-2)]
while self.cats[0] == self.cats[2] or self.cats[1] == self.cats[2]:
self.cats[2] = cat_cat[randint(0, len(cat_cat)-2)]
self.ori = cat_ori[randint(0,len(cat_ori)-1)]
self.week_hired = Game.save['week']
self.month_hired = Game.save['month']
self.year_hired = Game.save['year']
self.date_hired = 'Y' + str(self.year_hired) + 'M' + str(self.month_hired) + 'W' + str(self.week_hired)
self.charm = randint(20, 100)
self.looks = randint(20, 100)
self.voice = randint(20, 100)
self.fee = self.feeCalc(self.charm, self.looks, self.voice)
#300-6600
#150-6750
#randint(100, 2000) #max999,999xxx
def feeCalc(self,c, l, v):
###
#age should factor
#so does gender
###
#20*5 + 20*5 + 20*5 = 300
#100*5 + 100*5 + 100*5 = 1500
#if a stat > 75, *8
#if 2 stats > 75, *10
#if 3 stats > 75, *12
#if a stats == 100, *15
#if 2 stats == 100, *18
#if 3 stats == 100, *22
if c == 100 and l == 100 and v == 100:
result = (c+l+v)*22
elif c == 100 and l == 100:
result = (c+l+v)*18
elif c == 100 and v == 100:
result = (c+l+v)*18
elif l == 100 and v == 100:
result = (c+l+v)*18
elif c == 100 or l == 100 or v == 100:
result = (c+l+v)*15
elif c >= 75 and l >= 75 and v >=75:
result = (c+l+v)*12
elif c >= 75 and l >= 75:
result = (c+l+v)*10
elif c >= 75 and v >= 75:
result = (c+l+v)*10
elif l >= 75 and v >= 75:
result = (c+l+v)*10
elif c >= 75 or l >= 75 or v >= 75:
result = (c+l+v)*8
else:
result = (c+l+v)*5
return result + randint(-150, 150)
class Movie(object):
def __init__(self, title, cats, actors, budget, rating):
self.title = title
self.cats = cats
self.actors = actors
self.budget = budget
self.rating = rating
self.sales = 0
self.fans = 0 | kittenparry/kittyadultstudiogame | main/game.py | Python | mit | 3,795 |
# -*- coding: utf-8 -*-
def obj2dict(obj):
memberlist = [m for m in dir(obj)]
_dict = {}
for m in memberlist:
if m[0] != "_" and not callable(m):
_dict[m] = getattr(obj, m)
return _dict
| ShaolongHu/Nitrate | tcms/core/utils/obj2dict.py | Python | gpl-2.0 | 223 |
# -*- coding: utf-8 -*-
###############################################################################
#
# ListApplications
# Returns a list of Twilio applications associated with your account.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListApplications(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListApplications Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Twilio/Applications/ListApplications')
def new_input_set(self):
return ListApplicationsInputSet()
def _make_result_set(self, result, path):
return ListApplicationsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListApplicationsChoreographyExecution(session, exec_id, path)
class ListApplicationsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListApplications
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountSID(self, value):
"""
Set the value of the AccountSID input for this Choreo. ((required, string) The AccountSID provided when you signed up for a Twilio account.)
"""
InputSet._set_input(self, 'AccountSID', value)
def set_AuthToken(self, value):
"""
Set the value of the AuthToken input for this Choreo. ((required, string) The authorization token provided when you signed up for a Twilio account.)
"""
InputSet._set_input(self, 'AuthToken', value)
def set_FriendlyName(self, value):
"""
Set the value of the FriendlyName input for this Choreo. ((optional, string) Only return applications with friendly names that exactly match this name.)
"""
InputSet._set_input(self, 'FriendlyName', value)
def set_PageSize(self, value):
"""
Set the value of the PageSize input for this Choreo. ((optional, integer) The number of results per page.)
"""
InputSet._set_input(self, 'PageSize', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) The page of results to retrieve. Defaults to 0.)
"""
InputSet._set_input(self, 'Page', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
InputSet._set_input(self, 'ResponseFormat', value)
class ListApplicationsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListApplications Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Twilio.)
"""
return self._output.get('Response', None)
class ListApplicationsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListApplicationsResultSet(response, path)
| egetzel/wecrow | truehand2014/temboo/Library/Twilio/Applications/ListApplications.py | Python | apache-2.0 | 3,715 |
import bento.console as console
from sqlalchemy import create_engine, orm, exc, desc
def connect(file='database/system.db', register=[]):
engine = None
session = None
try:
engine = create_engine('sqlite:///{}'.format(file))
session = orm.scoped_session(orm.sessionmaker(bind=engine))()
for cls in register:
cls.register(engine)
except exc.SQLAlchemyError as e:
console.pp("Error starting database connection: {}".format(e), 'error')
return False, False
return engine, session | chefhasteeth/BentoBot | database/helpers.py | Python | bsd-3-clause | 553 |
import logging
import json
from ferris.core.template import render_template
def generic_handler(code, template=None):
if not template:
template = "%s" % code
template = ('errors/%s.html' % template, 'errors/500.html')
def inner(request, response, exception):
logging.exception(exception)
response.set_status(code)
if 'application/json' in request.headers.get('Accept') or request.headers.get('Content-Type') == 'application/json':
response.text = unicode(json.dumps({
'error': str(exception),
'code': code
}, encoding='utf-8', ensure_ascii=False))
else:
response.content_type = 'text/html'
response.text = render_template(template, {'request': request, 'exception': exception})
return inner
handle_400 = generic_handler(400)
handle_401 = generic_handler(401)
handle_403 = generic_handler(403, '401')
handle_404 = generic_handler(404)
handle_500 = generic_handler(500)
| yowmamasita/social-listener-exam | ferris/controllers/errors.py | Python | mit | 1,015 |
# -*- coding:utf-8 -*-
'''
Created on 2013-3-6
@author: corleone
统计标准
'''
from scrapy.spider import BaseSpider
class Tjbz_Spider(BaseSpider):
name = "nbsc"
url = "http://www.stats.gov.cn/tjbz/"
start_urls = [
"http://www.stats.gov.cn/tjbz/", # 2002-12-31
]
| 535521469/crawler_sth | scrapy/nbsc/spiders/tjbz_spider.py | Python | bsd-3-clause | 311 |
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
class Client(BaseClient):
def __init__(self, server_url, verify, proxy, headers, auth):
super().__init__(base_url=server_url, verify=verify, proxy=proxy, headers=headers, auth=auth)
def osv_get_vuln_by_id_request(self, id_):
headers = self._headers
response = self._http_request('get', f'v1new/vulns/{id_}', headers=headers)
return response
def osv_query_affected_by_commit_request(self, v1query_commit):
data = assign_params(commit=v1query_commit)
headers = self._headers
response = self._http_request('post', 'v1new/query', json_data=data, headers=headers)
return response
def osv_query_affected_by_package_request(self, v1query_version, v1query_package, v1query_ecosystem):
data = assign_params(version=v1query_version, package={'name': v1query_package, 'ecosystem': v1query_ecosystem})
headers = self._headers
response = self._http_request('post', 'v1new/query', json_data=data, headers=headers)
return response
def osv_get_vuln_by_id_command(client: Client, args: Dict[str, Any]) -> CommandResults:
id_ = str(args.get('id_', ''))
response = client.osv_get_vuln_by_id_request(id_)
command_results = CommandResults(
outputs_prefix='OSV.Vulnerability',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def osv_query_affected_by_commit_command(client: Client, args: Dict[str, Any]) -> CommandResults:
v1query_commit = str(args.get('commit', ''))
response = client.osv_query_affected_by_commit_request(v1query_commit)
if response:
command_results = CommandResults(
outputs_prefix='OSV.VulnerabilityList',
outputs_key_field='',
outputs=response['vulns'],
raw_response=response
)
else:
return_error("Please check if the value provided is correct")
return command_results
def osv_query_affected_by_package_command(client: Client, args: Dict[str, Any]) -> CommandResults:
v1query_version = str(args.get('version', ''))
v1query_package = str(args.get('packageName', ''))
v1query_ecosystem = str(args.get('ecosystem', ''))
response = client.osv_query_affected_by_package_request(v1query_version, v1query_package, v1query_ecosystem)
if response:
command_results = CommandResults(
outputs_prefix='OSV.VulnerabilityList',
outputs_key_field='',
outputs=response['vulns'],
raw_response=response
)
else:
return_error("Please check if the value provided is correct")
return command_results
def test_module(client: Client) -> None:
try:
client.osv_get_vuln_by_id_request("OSV-2020-111")
except Exception as e:
if 'Bug not found' in str(e):
return_error("Please check if the vulnerability OSV-2020-111 still exists")
else:
raise e
return_results('ok')
def main() -> None:
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
url = params.get('url')
verify_certificate: bool = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers: Dict[str, Any] = {}
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client: Client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
commands = {
'osv-get-vuln-by-id': osv_get_vuln_by_id_command,
'osv-query-affected-by-commit': osv_query_affected_by_commit_command,
'osv-query-affected-by-package': osv_query_affected_by_package_command
}
if command == 'test-module':
test_module(client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| demisto/content | Packs/OpenSourceVulnerabilities/Integrations/OSV/OSV.py | Python | mit | 4,275 |
#!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.spread import pb
class FrogPond:
def __init__(self, numFrogs, numToads):
self.numFrogs = numFrogs
self.numToads = numToads
def count(self):
return self.numFrogs + self.numToads
class SenderPond(FrogPond, pb.Copyable):
def getStateToCopy(self):
d = self.__dict__.copy()
d['frogsAndToads'] = d['numFrogs'] + d['numToads']
del d['numFrogs']
del d['numToads']
return d
class ReceiverPond(pb.RemoteCopy):
def setCopyableState(self, state):
self.__dict__ = state
def count(self):
return self.frogsAndToads
pb.setUnjellyableForClass(SenderPond, ReceiverPond)
| waseem18/oh-mainline | vendor/packages/twisted/doc/core/howto/listings/pb/copy2_classes.py | Python | agpl-3.0 | 765 |
# -*- coding: utf-8 -*-
# Copyright 2019 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Takes a .cov file and outputs annotated source files.
Unlike most code in gfauto, we use str instead of pathlib.Path because the increased speed is probably worthwhile.
"""
import argparse
import pickle
import sys
from gfauto import cov_util
def main() -> None:
parser = argparse.ArgumentParser(
description="Takes a .cov file and outputs annotated source files.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--coverage_out",
type=str,
help="The output directory for source files annotated with line coverage.",
default="",
)
parser.add_argument(
"--zero_coverage_out",
type=str,
help="The output directory for source files annotated with line coverage, assuming zero coverage.",
default="",
)
parser.add_argument("--cov", type=str, help="The .cov file.", default="output.cov")
parser.add_argument(
"build_dir",
type=str,
help="The build directory where the compiler was invoked.",
)
parsed_args = parser.parse_args(sys.argv[1:])
coverage_out: str = parsed_args.coverage_out
zero_coverage_out: str = parsed_args.zero_coverage_out
coverage_file: str = parsed_args.cov
build_dir: str = parsed_args.build_dir
with open(coverage_file, mode="rb") as f:
line_counts: cov_util.LineCounts = pickle.load(f)
if coverage_out:
cov_util.output_source_files(build_dir, coverage_out, line_counts)
if zero_coverage_out:
cov_util.output_source_files(
build_dir, zero_coverage_out, line_counts, force_zero_coverage=True
)
if __name__ == "__main__":
main()
| google/graphicsfuzz | gfauto/gfauto/cov_to_source.py | Python | apache-2.0 | 2,337 |
# -*- coding: utf-8 -*-
# This file is part of FOSSAds.
# Copyright (c) 2011 Marius Voilă
# FOSSAds is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
# FOSSAds is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with FOSSAds. If not, see <http://www.gnu.org/licenses/>.
from django.shortcuts import render_to_response
from django.template import RequestContext
from ads.models import Article
def get_post(request, slug):
'''Return a given post
'''
post = Article.objects.get(slug=slug)
return render_to_response('article.html', {
'post':post,
},
context_instance=RequestContext(request))
| mariusv/FOSSAds | ads/blog.py | Python | agpl-3.0 | 1,092 |
from django.db import models
from machines.models import Machine
from django.contrib.auth.models import User
class Object(models.Model):
name = models.CharField(max_length=20)
description = models.TextField(blank=True)
model_url = models.URLField()
gcode = models.TextField()
owner = models.ForeignKey(User)
#TODO: Put a link to a profile tag for gcode
def __unicode__(self):
return self.name
class Job(models.Model):
#guid for the job
machine = models.ForeignKey(Machine)
user = models.ForeignKey(User)
started_on = models.DateTimeField()
finished_on = models.DateTimeField()
eta = models.IntegerField() #estimated runtime in ms
#option fields (support, color, material)
| debben/RepPREP | editor/models.py | Python | lgpl-3.0 | 750 |
# Copyright (C) 2020 Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''Dax Sherpa Model Editor
Provides a simple GUI to edit model parameters
'''
from tkinter import Tk, StringVar, IntVar, END, font, messagebox
from tkinter.ttk import Frame, Button, Label, LabelFrame, Entry
from tkinter.ttk import Checkbutton, Style
import subprocess as sp
__all__ = ("DaxModelEditor", "DaxCancel")
class DaxCancel(Exception):
"Raised when the Cancel button is pressed"
def __init__(self, message):
super().__init__(message)
self.message = message
class DaxModelEditor():
'''A simple GUI to edit sherpa model parameters.
The gui is simple. Each model component is in it's own
LabelFrame where each model parameter is then 1 row in the
UI.
When the parameter value is edited, the text turns red. When
the user hits return, the value is set.
Note: really no specific sherpa code in here. Just setting
an object's .val property (and having freeze|thaw methods).
'''
def __init__(self, list_of_model_components, xpa_access_point=None,
hide_plot_button=False, xlabel=None, ylabel=None):
'''Create a new Tk window for the editor.
The user supplies a list of sherpa model components.
'''
self.xpa = xpa_access_point
self.win = Tk()
self.win.title("DAX Sherpa Model Editor")
sty = Style(self.win)
sty.theme_use("clam")
self.row = 0
self.model_parameters = []
for mdl in list_of_model_components:
self.add_model_component(mdl)
self.add_buttons(hide_plot_button)
self.cancel_clicked = False
self.x_label = xlabel
self.y_label = ylabel
def add_model_component(self, sherpa_model_component):
'''Create UI elements for model component.
Each component is a separate LabelFrame. Each
model parameter is a row within that frame.
'''
self.sherpa_model = sherpa_model_component
lfrm = LabelFrame(self.get_win(),
text=sherpa_model_component.name)
lfrm.grid(row=self.get_row(), column=0, columnspan=1,
padx=(10, 10), pady=(10, 10))
self.next_row()
# Repeat column headers in each model component
self.add_column_headers(lfrm)
for par in self.sherpa_model.pars:
mod_par = DaxModelParameter(self, lfrm, par)
self.model_parameters.append(mod_par)
self.next_row()
def add_buttons(self, hide_plot_button):
'''Add the buttons at the bottom of the UI'''
myfrm = Frame(self.get_win())
myfrm.grid(row=self.get_row(), column=0, pady=(5, 5))
abtn = Button(myfrm, text="Fit", command=self.fit)
abtn.grid(row=self.get_row(), column=0, columnspan=1,
padx=(20, 20), pady=(5, 5))
if hide_plot_button is False:
abtn = Button(myfrm, text="Plot", command=self.plot)
abtn.grid(row=self.get_row(), column=1, columnspan=1,
padx=(20, 20), pady=(5, 5))
abtn = Button(myfrm, text="Conf", command=self.conf)
abtn.grid(row=self.get_row(), column=2, columnspan=1,
padx=(20, 20), pady=(5, 5))
abtn = Button(myfrm, text="Quit", command=self.quit)
abtn.grid(row=self.get_row(), column=3, columnspan=1,
padx=(20, 20), pady=(5, 5))
abtn = Button(myfrm, text="Reset", command=self.reset)
abtn.grid(row=self.get_row(), column=4, columnspan=1,
padx=(20, 20), pady=(5, 5))
abtn = Button(myfrm, text="Cancel", command=self.cancel)
abtn.grid(row=self.get_row(), column=5, columnspan=1,
padx=(20, 20), pady=(5, 5))
def add_column_headers(self, lab_frame):
'''Add the labels for the columns. This needs to be in
sync with the DaxModelParameter.render_ui() method.
'''
row = self.get_row()
stt = Style()
lfont = stt.lookup("TLabel", "font")
basefont = font.nametofont(lfont)
stt.configure("Hdr.TLabel",
font=(basefont.cget("family"),
basefont.cget("size"),
"bold underline"))
cols = ["Parameter", "Value", "Frozen?", "Min", "Max", "Units"]
for col, txt in enumerate(cols):
label = Label(lab_frame, text=txt, style="Hdr.TLabel")
label.grid(row=row, column=col)
self.next_row()
def get_win(self):
'Return window object'
return self.win
def get_row(self):
'Return the current row in the UI'
return self.row
def next_row(self):
'Increment row in the UI'
self.row = self.row+1
def run(self, fit_command, conf_command=None):
'Start the event loop'
from os import environ
if 'DAXNOGUI' in environ:
return
self.fit_command = fit_command
self.conf_command = conf_command
self.win.mainloop()
# note to self, excpetions raised in the event loop are catch
# and not raised to calling application. So I have to set
# a flag and raise exception after exit loop.
if self.cancel_clicked is True:
raise DaxCancel("Cancel Button Pressed")
def conf(self):
'Run confidence command'
from sherpa.utils.err import EstErr
if self.conf_command:
try:
self.conf_command()
except EstErr as mybad:
messagebox.showerror("DAX Model Editor", str(mybad))
def fit(self):
'''Go ahead and fit the data
'''
try:
self.fit_command()
except Exception as mybad:
messagebox.showerror("DAX Model Editor", str(mybad))
self.update()
def quit(self):
'Continue on with rest of script'
self.win.quit()
self.win.destroy()
def reset(self):
"Restore all values back to initial values"
for modpar in self.model_parameters:
modpar.reset()
def update(self):
"Update all values "
for modpar in self.model_parameters:
try:
modpar.update()
except:
pass
def cancel(self):
'''Stop the event loop and set cancel flag'''
self.win.quit()
self.cancel_clicked = True
@staticmethod
def xpaget(ds9, cmd):
"Run xpaget and return string"
runcmd = ["xpaget", ds9]
runcmd.extend(cmd.split(" "))
try:
out = sp.run(runcmd, check=False, stdout=sp.PIPE).stdout
except sp.CalledProcessError as sp_err:
raise RuntimeError("Problem getting '{}'.".format(runcmd) +
"Error message: {}".format(str(sp_err)))
return out.decode().strip()
def __del__(self):
"""Make sure ds9 plot window is closed"""
if self.xpa is None:
return
plots = self.xpaget(self.xpa, "plot") # Get a list of plots.
plots.split(" ")
if "dax_model_editor" in plots:
runcmd = ["xpaset", "-p", self.xpa, "plot",
"dax_model_editor", "close"]
sp.run(runcmd, check=False)
def plot(self):
'''Plot model with current parameters'''
import sherpa.astro.ui as sherpa
if self.xpa is None:
import matplotlib.pylab as plt
sherpa.plot_fit_delchi()
plt.show()
return
plots = self.xpaget(self.xpa, "plot") # Get a list of plots.
plots.split(" ")
newplot = ("dax_model_editor" not in plots)
_f = sherpa.get_fit_plot()
_d = _f.dataplot
_m = _f.modelplot
if _d.xerr is None:
_d.xerr = (_d.x-_d.x) # zeros
if self.x_label is None:
xlab = _f.dataplot.xlabel
else:
xlab = self.x_label
if self.y_label is None:
ylab = _f.dataplot.ylabel
else:
ylab = self.y_label
import dax.dax_plot_utils as dax_plot
if hasattr(_m, "xlo"):
mx = list(_m.xlo)
mx.append(_m.xhi[-1])
my = list(_m.y)
my.append(_m.y[-1])
step = True
else:
mx = _m.x
my = _m.y
step = False
dax_plot.blt_plot_model(self.xpa, mx, my,
"Dax Model Editor Plot",
xlab, ylab, step=step,
new=newplot, winname="dax_model_editor")
dax_plot.blt_plot_data(self.xpa, _d.x, _d.xerr/2.0, _d.y, _d.yerr)
delta = (_d.y-_m.y)/_d.yerr
ones = _d.yerr*0.0+1.0
dax_plot.blt_plot_delchisqr(self.xpa, _d.x, _d.xerr/2.0, delta,
ones, "")
class DaxModelParameter():
'''The UI elements and logic to set model parameter values.
For this application; all model parameters are assumed to be
floats (or ints cast to floats). Strings and Logicals need not
apply.
'''
def __init__(self, parent, label_frame, sherpa_model_parameter):
'''Create model parameter UI element'''
self.sherpa_par = sherpa_model_parameter
self.parent = parent
self.label_frame = label_frame
self.initial_value = {'val': self.sherpa_par.val,
'min': self.sherpa_par.min,
'max': self.sherpa_par.max}
self.render_ui()
def _freeze_thaw(self):
'''ACTION: set the freeze() or thaw() based on the
checkbox value.'''
if 1 == self.fz_box.get():
self.sherpa_par.freeze()
else:
self.sherpa_par.thaw()
@staticmethod
def __format_val(val):
'Format parameter values'
retval = "{:.5g}".format(val)
return retval
def reset(self):
"""Reset values to original"""
for field in ['max', 'min', 'val']:
to_mod = getattr(self, field)
to_mod.delete(0, END)
to_mod.insert(0, self.__format_val(self.initial_value[field]))
to_mod.configure(foreground="black")
setattr(self.sherpa_par, field, self.initial_value[field])
def update(self):
"""Reset values to original"""
for field in ['max', 'min', 'val']:
to_mod = getattr(self, field)
newval = getattr(self.sherpa_par, field)
to_mod.delete(0, END)
to_mod.insert(0, self.__format_val(newval))
# ~ to_mod.configure(foreground="black")
# ~ setattr(self.sherpa_par, field, self.initial_value[field])
def entry_callback(self, keyevt, field):
'''ACTION: set the model parameter value when the user
type <<Return>>. Otherwise, when user edits value
it turns red so user knows it hasn't been set yet.
All values are cast|set to doubles.
There is no validation in the UI against the min|max
values. Sherpa raises an exception if you try to go beyond
the limits so the color remains red until valid value is
entered.
'''
from sherpa.utils.err import ParameterErr
# Note: use .char instead of .keysym because Return
# and Enter on the keypad are different keysym's but both
# generate CR. This makes sense since can remap keyboard
# keys -- the action we want is CR, whichever key generates it.
# Update: Unfortunately the .char field is cleared
# in newer versions of python in the KeyRelease callback, and
# the Key callback doesn't work (order of callback doesn't change
# text color correctly). So, I'm back to using the keysym.
to_mod = getattr(self, field)
if keyevt.keysym in ['Return', 'KP_Enter', 'Enter']:
try:
fval = float(to_mod.get())
setattr(self.sherpa_par, field, fval)
to_mod.configure(foreground="black")
to_mod.last_value = to_mod.get()
except (ValueError, ParameterErr) as val_err:
messagebox.showerror("DAX Model Editor", str(val_err))
else:
if to_mod.get() != to_mod.last_value:
to_mod.configure(foreground="red")
def render_ui(self):
'''Render the parameter UI elements and attach bindings'''
row = self.parent.get_row()
win = self.label_frame
# The parameter name
lab = Label(win, text=self.sherpa_par.name,
width=12, anchor="e")
lab.grid(row=row, column=0, padx=(5, 5), pady=2)
# The current parameter value
self.val_str = StringVar()
self.val = Entry(win, textvariable=self.val_str,
foreground="black", width=12, justify="right")
self.val.grid(row=row, column=1, padx=(5, 5), pady=2)
self.val.delete(0, END)
self.val.insert(0, self.__format_val(self.sherpa_par.val))
self.val.last_value = self.val.get()
self.val.bind("<KeyRelease>",
lambda x: self.entry_callback(x, field='val'))
# Frozen|Thawed checkbox. Checked if frozen.
self.fz_box = IntVar()
if self.sherpa_par.frozen is True:
self.fz_box.set(1)
else:
self.fz_box.set(0)
fzbtn = Checkbutton(win, text="", variable=self.fz_box,
command=self._freeze_thaw)
fzbtn.grid(row=row, column=2, padx=(5, 5), pady=2)
# The min value
self.min_str = StringVar()
self.min = Entry(win, textvariable=self.min_str,
foreground="black", width=12, justify="right")
self.min.grid(row=row, column=3, padx=(5, 5), pady=2)
self.min.delete(0, END)
self.min.insert(0, self.__format_val(self.sherpa_par.min))
self.min.last_value = self.min.get()
self.min.bind("<KeyRelease>",
lambda x: self.entry_callback(x, field='min'))
# The max value
self.max_str = StringVar()
self.max = Entry(win, textvariable=self.max_str,
foreground="black", width=12, justify="right")
self.max.grid(row=row, column=4, padx=(5, 5), pady=2)
self.max.delete(0, END)
self.max.insert(0, self.__format_val(self.sherpa_par.max))
self.max.last_value = self.max.get()
self.max.bind("<KeyRelease>",
lambda x: self.entry_callback(x, field='max'))
# The units of the parameter
par_units = Label(win, text="{}".format(self.sherpa_par.units),
width=20, anchor="e")
par_units.grid(row=row, column=5, padx=(5, 5), pady=2)
def test_dax_if():
'''Test script'''
import sherpa.astro.ui as sherpa
sherpa.load_arrays(1, [1, 2, 3], [4, 5, 6], sherpa.Data1D)
sherpa.set_source("polynom1d.ply")
# DaxModelEditor([ply], "ds9").run()
DaxModelEditor([ply]).run(sherpa.fit)
if __name__ == '__main__':
test_dax_if()
| cxcsds/ciao-contrib | dax/dax_model_editor.py | Python | gpl-3.0 | 15,955 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Snowpyt'
copyright = '2020, S. Filhol, M. Lanzky'
author = 'S. Filhol, M. Lanzky'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['recommonmark',
'sphinx_rtd_theme',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store','.readthedocs.yml','README.md']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | ArcticSnow/snowpyt | docs/conf.py | Python | mit | 1,971 |
@given(u'the WOT ID exists')
def step_impl(context):
context.node_simulator.add_user("Me", insert=True)
@given(u'there are other blockchain users')
def step_impl(context):
context.node_simulator.add_user("Adam")
context.node_simulator.add_user("Bill")
context.node_simulator.add_user("Charlie")
@given(u'the blockchain exists')
def step_impl(context):
context.node_simulator.add_block(1)
| debbiedub/bcdef | features/steps/blockchain.py | Python | gpl-3.0 | 411 |
# -*- coding: utf-8 -*-
"""This file is part of the TPOT library.
TPOT was primarily developed at the University of Pennsylvania by:
- Randal S. Olson (rso@randalolson.com)
- Weixuan Fu (weixuanf@upenn.edu)
- Daniel Angell (dpa34@drexel.edu)
- and many more generous open source contributors
TPOT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
TPOT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with TPOT. If not, see <http://www.gnu.org/licenses/>.
"""
from tpot import TPOTClassifier, TPOTRegressor
from tpot.base import TPOTBase
from tpot.driver import float_range
from tpot.gp_types import Output_Array
from tpot.gp_deap import mutNodeReplacement, _wrapped_cross_val_score, pick_two_individuals_eligible_for_crossover, cxOnePoint, varOr, initialize_stats_dict
from tpot.metrics import balanced_accuracy, SCORERS
from tpot.operator_utils import TPOTOperatorClassFactory, set_sample_weight, source_decode
from tpot.decorators import pretest_X, pretest_y
from tpot.config.classifier import classifier_config_dict
from tpot.config.classifier_light import classifier_config_dict_light
from tpot.config.regressor_light import regressor_config_dict_light
from tpot.config.classifier_mdr import tpot_mdr_classifier_config_dict
from tpot.config.regressor_mdr import tpot_mdr_regressor_config_dict
from tpot.config.regressor_sparse import regressor_config_sparse
from tpot.config.classifier_sparse import classifier_config_sparse
import numpy as np
import pandas as pd
from scipy import sparse
import inspect
import random
import warnings
from multiprocessing import cpu_count
import os
from re import search
from datetime import datetime
from time import sleep
from tempfile import mkdtemp
from shutil import rmtree
from sklearn.datasets import load_digits, load_boston
from sklearn.model_selection import train_test_split, cross_val_score, GroupKFold
from sklearn.externals.joblib import Memory
from sklearn.metrics import make_scorer, roc_auc_score
from deap import creator, gp
from deap.tools import ParetoFront
from nose.tools import assert_raises, assert_not_equal, assert_greater_equal, assert_equal, assert_in
from driver_tests import captured_output
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from tqdm.autonotebook import tqdm
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
# Ensure we can use `with closing(...) as ... :` syntax
if getattr(StringIO, '__exit__', False) and \
getattr(StringIO, '__enter__', False):
def closing(arg):
return arg
else:
from contextlib import closing
# Set up the MNIST data set for testing
mnist_data = load_digits()
training_features, testing_features, training_target, testing_target = \
train_test_split(mnist_data.data.astype(np.float64), mnist_data.target.astype(np.float64), random_state=42)
# Set up test data with missing value
features_with_nan = np.copy(training_features)
features_with_nan[0][0] = float('nan')
# Set up the Boston data set for testing
boston_data = load_boston()
training_features_r, testing_features_r, training_target_r, testing_target_r = \
train_test_split(boston_data.data, boston_data.target, random_state=42)
# Set up pandas DataFrame for testing
input_data = pd.read_csv(
'tests/tests.csv',
sep=',',
dtype=np.float64,
)
pd_features = input_data.drop('class', axis=1)
pd_target = input_data['class']
# Set up the sparse matrix for testing
sparse_features = sparse.csr_matrix(training_features)
sparse_target = training_target
np.random.seed(42)
random.seed(42)
test_operator_key = 'sklearn.feature_selection.SelectPercentile'
TPOTSelectPercentile, TPOTSelectPercentile_args = TPOTOperatorClassFactory(
test_operator_key,
classifier_config_dict[test_operator_key]
)
tpot_obj = TPOTClassifier()
tpot_obj._fit_init()
def test_init_custom_parameters():
"""Assert that the TPOT instantiator stores the TPOT variables properly."""
tpot_obj = TPOTClassifier(
population_size=500,
generations=1000,
offspring_size=2000,
mutation_rate=0.05,
crossover_rate=0.9,
scoring='accuracy',
cv=10,
verbosity=1,
random_state=42,
disable_update_check=True,
warm_start=True
)
assert tpot_obj.population_size == 500
assert tpot_obj.generations == 1000
assert tpot_obj.offspring_size == 2000
assert tpot_obj.mutation_rate == 0.05
assert tpot_obj.crossover_rate == 0.9
assert tpot_obj.scoring_function == 'accuracy'
assert tpot_obj.cv == 10
assert tpot_obj.max_time_mins is None
assert tpot_obj.warm_start is True
assert tpot_obj.verbosity == 1
tpot_obj._fit_init()
assert tpot_obj._pop == []
assert tpot_obj._pareto_front == None
assert tpot_obj._last_optimized_pareto_front == None
assert tpot_obj._last_optimized_pareto_front_n_gens == 0
assert tpot_obj._optimized_pipeline == None
assert tpot_obj._optimized_pipeline_score == None
assert tpot_obj.fitted_pipeline_ == None
assert tpot_obj._exported_pipeline_text == []
def test_init_default_scoring():
"""Assert that TPOT intitializes with the correct default scoring function."""
tpot_obj = TPOTRegressor()
assert tpot_obj.scoring_function == 'neg_mean_squared_error'
tpot_obj = TPOTClassifier()
assert tpot_obj.scoring_function == 'accuracy'
def test_init_default_scoring_2():
"""Assert that TPOT intitializes with a valid customized metric function."""
with warnings.catch_warnings(record=True) as w:
tpot_obj = TPOTClassifier(scoring=balanced_accuracy)
tpot_obj._fit_init()
assert len(w) == 1 # deap 1.2.2 warning message made this unit test failed
assert issubclass(w[-1].category, DeprecationWarning) # deap 1.2.2 warning message made this unit test failed
assert "This scoring type was deprecated" in str(w[-1].message) # deap 1.2.2 warning message made this unit test failed
assert tpot_obj.scoring_function == 'balanced_accuracy'
def test_init_default_scoring_3():
"""Assert that TPOT intitializes with a valid _BaseScorer."""
with warnings.catch_warnings(record=True) as w:
tpot_obj = TPOTClassifier(scoring=make_scorer(balanced_accuracy))
tpot_obj._fit_init()
assert len(w) == 0 # deap 1.2.2 warning message made this unit test failed
assert tpot_obj.scoring_function == 'balanced_accuracy'
def test_init_default_scoring_4():
"""Assert that TPOT intitializes with a valid scorer."""
def my_scorer(clf, X, y):
return 0.9
with warnings.catch_warnings(record=True) as w:
tpot_obj = TPOTClassifier(scoring=my_scorer)
tpot_obj._fit_init()
assert len(w) == 0 # deap 1.2.2 warning message made this unit test failed
assert tpot_obj.scoring_function == 'my_scorer'
def test_init_default_scoring_5():
"""Assert that TPOT intitializes with a valid sklearn metric function roc_auc_score."""
with warnings.catch_warnings(record=True) as w:
tpot_obj = TPOTClassifier(scoring=roc_auc_score)
tpot_obj._fit_init()
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "This scoring type was deprecated" in str(w[-1].message)
assert tpot_obj.scoring_function == 'roc_auc_score'
def test_init_default_scoring_6():
"""Assert that TPOT intitializes with a valid customized metric function in __main__"""
def my_scorer(y_true, y_pred):
return roc_auc_score(y_true, y_pred)
with warnings.catch_warnings(record=True) as w:
tpot_obj = TPOTClassifier(scoring=my_scorer)
tpot_obj._fit_init()
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "This scoring type was deprecated" in str(w[-1].message)
assert tpot_obj.scoring_function == 'my_scorer'
def test_invalid_score_warning():
"""Assert that the TPOT intitializes raises a ValueError when the scoring metrics is not available in SCORERS."""
# Mis-spelled scorer
tpot_obj = TPOTClassifier(scoring='balanced_accuray')
assert_raises(ValueError, tpot_obj._fit_init)
# Correctly spelled
tpot_obj = TPOTClassifier(scoring='balanced_accuracy')
def test_invalid_dataset_warning():
"""Assert that the TPOT fit function raises a ValueError when dataset is not in right format."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0
)
tpot_obj._fit_init()
# common mistake in target
bad_training_target = training_target.reshape((1, len(training_target)))
assert_raises(ValueError, tpot_obj.fit, training_features, bad_training_target)
def test_invalid_subsample_ratio_warning():
"""Assert that the TPOT intitializes raises a ValueError when subsample ratio is not in the range (0.0, 1.0]."""
# Invalid ratio
tpot_obj = TPOTClassifier(subsample=0.0)
assert_raises(ValueError, tpot_obj._fit_init)
# Valid ratio
TPOTClassifier(subsample=0.1)
def test_invalid_mut_rate_plus_xo_rate():
"""Assert that the TPOT intitializes raises a ValueError when the sum of crossover and mutation probabilities is large than 1."""
# Invalid ratio
tpot_obj = TPOTClassifier(mutation_rate=0.8, crossover_rate=0.8)
assert_raises(ValueError, tpot_obj._fit_init)
# Valid ratio
TPOTClassifier(mutation_rate=0.8, crossover_rate=0.1)
def test_init_max_time_mins():
"""Assert that the TPOT init stores max run time and sets generations to 1000000."""
tpot_obj = TPOTClassifier(max_time_mins=30, generations=1000)
tpot_obj._fit_init()
assert tpot_obj.generations == 1000000
assert tpot_obj.max_time_mins == 30
def test_init_n_jobs():
"""Assert that the TPOT init stores current number of processes."""
tpot_obj = TPOTClassifier(n_jobs=2)
assert tpot_obj.n_jobs == 2
tpot_obj = TPOTClassifier(n_jobs=-1)
assert tpot_obj.n_jobs == -1
tpot_obj._fit_init()
assert tpot_obj._n_jobs == cpu_count()
def test_timeout():
"""Assert that _wrapped_cross_val_score return Timeout in a time limit."""
tpot_obj = TPOTRegressor(scoring='neg_mean_squared_error')
tpot_obj._fit_init()
# a complex pipeline for the test
pipeline_string = (
"ExtraTreesRegressor("
"GradientBoostingRegressor(input_matrix, GradientBoostingRegressor__alpha=0.8,"
"GradientBoostingRegressor__learning_rate=0.1,GradientBoostingRegressor__loss=huber,"
"GradientBoostingRegressor__max_depth=5, GradientBoostingRegressor__max_features=0.5,"
"GradientBoostingRegressor__min_samples_leaf=5, GradientBoostingRegressor__min_samples_split=5,"
"GradientBoostingRegressor__n_estimators=100, GradientBoostingRegressor__subsample=0.25),"
"ExtraTreesRegressor__bootstrap=True, ExtraTreesRegressor__max_features=0.5,"
"ExtraTreesRegressor__min_samples_leaf=5, ExtraTreesRegressor__min_samples_split=5, "
"ExtraTreesRegressor__n_estimators=100)"
)
tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
tpot_obj.fitted_pipeline_ = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)
# test _wrapped_cross_val_score with cv=20 so that it is impossible to finish in 1 second
return_value = _wrapped_cross_val_score(tpot_obj.fitted_pipeline_,
training_features_r,
training_target_r,
cv=20,
scoring_function='neg_mean_squared_error',
sample_weight=None,
groups=None,
timeout=1)
assert return_value == "Timeout"
def test_invalid_pipeline():
"""Assert that _wrapped_cross_val_score return -float(\'inf\') with a invalid_pipeline"""
# a invalid pipeline
# Dual or primal formulation. Dual formulation is only implemented for l2 penalty.
pipeline_string = (
'LogisticRegression(input_matrix, LogisticRegression__C=10.0, '
'LogisticRegression__dual=True, LogisticRegression__penalty=l1)'
)
tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
tpot_obj.fitted_pipeline_ = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)
# test _wrapped_cross_val_score with cv=20 so that it is impossible to finish in 1 second
return_value = _wrapped_cross_val_score(tpot_obj.fitted_pipeline_,
training_features,
training_target,
cv=5,
scoring_function='accuracy',
sample_weight=None,
groups=None,
timeout=300)
assert return_value == -float('inf')
def test_balanced_accuracy():
"""Assert that the balanced_accuracy in TPOT returns correct accuracy."""
y_true = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4])
y_pred1 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4])
y_pred2 = np.array([3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4])
accuracy_score1 = balanced_accuracy(y_true, y_pred1)
accuracy_score2 = balanced_accuracy(y_true, y_pred2)
assert np.allclose(accuracy_score1, 1.0)
assert np.allclose(accuracy_score2, 0.833333333333333)
def test_get_params():
"""Assert that get_params returns the exact dictionary of parameters used by TPOT."""
kwargs = {
'population_size': 500,
'generations': 1000,
'config_dict': 'TPOT light',
'offspring_size': 2000,
'verbosity': 1
}
tpot_obj = TPOTClassifier(**kwargs)
# Get default parameters of TPOT and merge with our specified parameters
initializer = inspect.getargspec(TPOTBase.__init__)
default_kwargs = dict(zip(initializer.args[1:], initializer.defaults))
default_kwargs.update(kwargs)
assert tpot_obj.get_params()['config_dict'] == 'TPOT light'
assert tpot_obj.get_params() == default_kwargs
def test_set_params():
"""Assert that set_params returns a reference to the TPOT instance."""
assert tpot_obj.set_params() is tpot_obj
def test_set_params_2():
"""Assert that set_params updates TPOT's instance variables."""
tpot_obj = TPOTClassifier(generations=2)
tpot_obj.set_params(generations=3)
assert tpot_obj.generations == 3
def test_TPOTBase():
"""Assert that TPOTBase class raises RuntimeError when using it directly."""
assert_raises(RuntimeError, TPOTBase)
def test_conf_dict():
"""Assert that TPOT uses the pre-configured dictionary of operators when config_dict is 'TPOT light' or 'TPOT MDR'."""
tpot_obj = TPOTClassifier(config_dict='TPOT light')
tpot_obj._fit_init()
assert tpot_obj._config_dict == classifier_config_dict_light
tpot_obj = TPOTClassifier(config_dict='TPOT MDR')
tpot_obj._fit_init()
assert tpot_obj._config_dict == tpot_mdr_classifier_config_dict
tpot_obj = TPOTClassifier(config_dict='TPOT sparse')
tpot_obj._fit_init()
assert tpot_obj._config_dict == classifier_config_sparse
tpot_obj = TPOTRegressor(config_dict='TPOT light')
tpot_obj._fit_init()
assert tpot_obj._config_dict == regressor_config_dict_light
tpot_obj = TPOTRegressor(config_dict='TPOT MDR')
tpot_obj._fit_init()
assert tpot_obj._config_dict == tpot_mdr_regressor_config_dict
tpot_obj = TPOTRegressor(config_dict='TPOT sparse')
tpot_obj._fit_init()
assert tpot_obj._config_dict == regressor_config_sparse
def test_conf_dict_2():
"""Assert that TPOT uses a custom dictionary of operators when config_dict is Python dictionary."""
tpot_obj = TPOTClassifier(config_dict=tpot_mdr_classifier_config_dict)
assert tpot_obj.config_dict == tpot_mdr_classifier_config_dict
def test_conf_dict_3():
"""Assert that TPOT uses a custom dictionary of operators when config_dict is the path of Python dictionary."""
tpot_obj = TPOTRegressor(config_dict='tests/test_config.py')
tpot_obj._fit_init()
tested_config_dict = {
'sklearn.naive_bayes.GaussianNB': {
},
'sklearn.naive_bayes.BernoulliNB': {
'alpha': [1e-3, 1e-2, 1e-1, 1., 10., 100.],
'fit_prior': [True, False]
},
'sklearn.naive_bayes.MultinomialNB': {
'alpha': [1e-3, 1e-2, 1e-1, 1., 10., 100.],
'fit_prior': [True, False]
}
}
assert isinstance(tpot_obj.config_dict, str)
assert isinstance(tpot_obj._config_dict, dict)
assert tpot_obj._config_dict == tested_config_dict
def test_read_config_file():
"""Assert that _read_config_file rasies FileNotFoundError with a wrong path."""
tpot_obj = TPOTRegressor()
# typo for "tests/test_config.py"
assert_raises(ValueError, tpot_obj._read_config_file, "tests/test_confg.py")
def test_read_config_file_2():
"""Assert that _read_config_file rasies ValueError with wrong dictionary format"""
tpot_obj = TPOTRegressor()
assert_raises(ValueError, tpot_obj._read_config_file, "tests/test_config.py.bad")
def test_read_config_file_3():
"""Assert that _read_config_file rasies ValueError without a dictionary named 'tpot_config'."""
tpot_obj = TPOTRegressor()
assert_raises(ValueError, tpot_obj._setup_config, "tpot/config/regressor_sparse.py")
def test_random_ind():
"""Assert that the TPOTClassifier can generate the same pipeline with same random seed."""
tpot_obj = TPOTClassifier(random_state=43)
tpot_obj._fit_init()
pipeline1 = str(tpot_obj._toolbox.individual())
tpot_obj = TPOTClassifier(random_state=43)
tpot_obj._fit_init()
pipeline2 = str(tpot_obj._toolbox.individual())
assert pipeline1 == pipeline2
def test_random_ind_2():
"""Assert that the TPOTRegressor can generate the same pipeline with same random seed."""
tpot_obj = TPOTRegressor(random_state=43)
tpot_obj._fit_init()
pipeline1 = str(tpot_obj._toolbox.individual())
tpot_obj = TPOTRegressor(random_state=43)
tpot_obj._fit_init()
pipeline2 = str(tpot_obj._toolbox.individual())
assert pipeline1 == pipeline2
def test_score():
"""Assert that the TPOT score function raises a RuntimeError when no optimized pipeline exists."""
tpot_obj = TPOTClassifier()
tpot_obj._fit_init()
assert_raises(RuntimeError, tpot_obj.score, testing_features, testing_target)
def test_score_2():
"""Assert that the TPOTClassifier score function outputs a known score for a fixed pipeline."""
tpot_obj = TPOTClassifier(random_state=34)
tpot_obj._fit_init()
known_score = 0.977777777778 # Assumes use of the TPOT accuracy function
# Create a pipeline with a known score
pipeline_string = (
'KNeighborsClassifier('
'input_matrix, '
'KNeighborsClassifier__n_neighbors=10, '
'KNeighborsClassifier__p=1, '
'KNeighborsClassifier__weights=uniform'
')'
)
tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
tpot_obj.fitted_pipeline_ = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)
tpot_obj.fitted_pipeline_.fit(training_features, training_target)
# Get score from TPOT
score = tpot_obj.score(testing_features, testing_target)
assert np.allclose(known_score, score)
def test_score_3():
"""Assert that the TPOTRegressor score function outputs a known score for a fixed pipeline."""
tpot_obj = TPOTRegressor(scoring='neg_mean_squared_error', random_state=72)
tpot_obj._fit_init()
known_score = -11.682841148312662
# Reify pipeline with known score
pipeline_string = (
"ExtraTreesRegressor("
"GradientBoostingRegressor(input_matrix, GradientBoostingRegressor__alpha=0.8,"
"GradientBoostingRegressor__learning_rate=0.1,GradientBoostingRegressor__loss=huber,"
"GradientBoostingRegressor__max_depth=5, GradientBoostingRegressor__max_features=0.5,"
"GradientBoostingRegressor__min_samples_leaf=5, GradientBoostingRegressor__min_samples_split=5,"
"GradientBoostingRegressor__n_estimators=100, GradientBoostingRegressor__subsample=0.25),"
"ExtraTreesRegressor__bootstrap=True, ExtraTreesRegressor__max_features=0.5,"
"ExtraTreesRegressor__min_samples_leaf=5, ExtraTreesRegressor__min_samples_split=5, "
"ExtraTreesRegressor__n_estimators=100)"
)
tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
tpot_obj.fitted_pipeline_ = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)
tpot_obj.fitted_pipeline_.fit(training_features_r, training_target_r)
# Get score from TPOT
score = tpot_obj.score(testing_features_r, testing_target_r)
assert np.allclose(known_score, score)
def test_sample_weight_func():
"""Assert that the TPOTRegressor score function outputs a known score for a fixed pipeline with sample weights."""
tpot_obj = TPOTRegressor(scoring='neg_mean_squared_error')
tpot_obj._fit_init()
# Reify pipeline with known scor
pipeline_string = (
"ExtraTreesRegressor("
"GradientBoostingRegressor(input_matrix, GradientBoostingRegressor__alpha=0.8,"
"GradientBoostingRegressor__learning_rate=0.1,GradientBoostingRegressor__loss=huber,"
"GradientBoostingRegressor__max_depth=5, GradientBoostingRegressor__max_features=0.5,"
"GradientBoostingRegressor__min_samples_leaf=5, GradientBoostingRegressor__min_samples_split=5,"
"GradientBoostingRegressor__n_estimators=100, GradientBoostingRegressor__subsample=0.25),"
"ExtraTreesRegressor__bootstrap=True, ExtraTreesRegressor__max_features=0.5,"
"ExtraTreesRegressor__min_samples_leaf=5, ExtraTreesRegressor__min_samples_split=5, "
"ExtraTreesRegressor__n_estimators=100)"
)
tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
tpot_obj.fitted_pipeline_ = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)
tpot_obj.fitted_pipeline_.fit(training_features_r, training_target_r)
tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
tpot_obj.fitted_pipeline_ = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)
# make up a sample weight
training_target_r_weight = np.array(range(1, len(training_target_r)+1))
training_target_r_weight_dict = set_sample_weight(tpot_obj.fitted_pipeline_.steps, training_target_r_weight)
np.random.seed(42)
cv_score1 = cross_val_score(tpot_obj.fitted_pipeline_, training_features_r, training_target_r, cv=3, scoring='neg_mean_squared_error')
np.random.seed(42)
cv_score2 = cross_val_score(tpot_obj.fitted_pipeline_, training_features_r, training_target_r, cv=3, scoring='neg_mean_squared_error')
np.random.seed(42)
cv_score_weight = cross_val_score(tpot_obj.fitted_pipeline_, training_features_r, training_target_r, cv=3, scoring='neg_mean_squared_error', fit_params=training_target_r_weight_dict)
np.random.seed(42)
tpot_obj.fitted_pipeline_.fit(training_features_r, training_target_r, **training_target_r_weight_dict)
# Get score from TPOT
known_score = -11.586816877933911
score = tpot_obj.score(testing_features_r, testing_target_r)
assert np.allclose(cv_score1, cv_score2)
assert not np.allclose(cv_score1, cv_score_weight)
assert np.allclose(known_score, score)
def test_fit_GroupKFold():
"""Assert that TPOT properly handles the group parameter when using GroupKFold."""
# This check tests if the darker MNIST images would generalize to the lighter ones.
means = np.mean(training_features, axis=1)
groups = means >= np.median(means)
tpot_obj = TPOTClassifier(
random_state=42,
population_size=2,
offspring_size=4,
generations=1,
verbosity=0,
config_dict='TPOT light',
cv=GroupKFold(n_splits=2),
)
tpot_obj.fit(training_features, training_target, groups=groups)
assert_greater_equal(tpot_obj.score(testing_features, testing_target), 0.97)
def test_predict():
"""Assert that the TPOT predict function raises a RuntimeError when no optimized pipeline exists."""
tpot_obj = TPOTClassifier()
tpot_obj._fit_init()
assert_raises(RuntimeError, tpot_obj.predict, testing_features)
def test_predict_2():
"""Assert that the TPOT predict function returns a numpy matrix of shape (num_testing_rows,)."""
tpot_obj = TPOTClassifier()
tpot_obj._fit_init()
pipeline_string = (
'DecisionTreeClassifier('
'input_matrix, '
'DecisionTreeClassifier__criterion=gini, '
'DecisionTreeClassifier__max_depth=8, '
'DecisionTreeClassifier__min_samples_leaf=5, '
'DecisionTreeClassifier__min_samples_split=5'
')'
)
tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
tpot_obj.fitted_pipeline_ = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)
tpot_obj.fitted_pipeline_.fit(training_features, training_target)
result = tpot_obj.predict(testing_features)
assert result.shape == (testing_features.shape[0],)
def test_predict_3():
"""Assert that the TPOT predict function works on dataset with nan"""
tpot_obj = TPOTClassifier()
tpot_obj._fit_init()
pipeline_string = (
'DecisionTreeClassifier('
'input_matrix, '
'DecisionTreeClassifier__criterion=gini, '
'DecisionTreeClassifier__max_depth=8, '
'DecisionTreeClassifier__min_samples_leaf=5, '
'DecisionTreeClassifier__min_samples_split=5'
')'
)
tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
tpot_obj.fitted_pipeline_ = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)
tpot_obj.fitted_pipeline_.fit(training_features, training_target)
result = tpot_obj.predict(features_with_nan)
assert result.shape == (features_with_nan.shape[0],)
def test_predict_proba():
"""Assert that the TPOT predict_proba function returns a numpy matrix of shape (num_testing_rows, num_testing_target)."""
tpot_obj = TPOTClassifier()
tpot_obj._fit_init()
pipeline_string = (
'DecisionTreeClassifier('
'input_matrix, '
'DecisionTreeClassifier__criterion=gini, '
'DecisionTreeClassifier__max_depth=8, '
'DecisionTreeClassifier__min_samples_leaf=5, '
'DecisionTreeClassifier__min_samples_split=5)'
)
tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
tpot_obj.fitted_pipeline_ = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)
tpot_obj.fitted_pipeline_.fit(training_features, training_target)
result = tpot_obj.predict_proba(testing_features)
num_labels = np.amax(testing_target) + 1
assert result.shape == (testing_features.shape[0], num_labels)
def test_predict_proba_2():
"""Assert that the TPOT predict_proba function returns a numpy matrix filled with probabilities (float)."""
tpot_obj = TPOTClassifier()
tpot_obj._fit_init()
pipeline_string = (
'DecisionTreeClassifier('
'input_matrix, '
'DecisionTreeClassifier__criterion=gini, '
'DecisionTreeClassifier__max_depth=8, '
'DecisionTreeClassifier__min_samples_leaf=5, '
'DecisionTreeClassifier__min_samples_split=5)'
)
tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
tpot_obj.fitted_pipeline_ = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)
tpot_obj.fitted_pipeline_.fit(training_features, training_target)
result = tpot_obj.predict_proba(testing_features)
rows, columns = result.shape
for i in range(rows):
for j in range(columns):
float_range(result[i][j])
def test_predict_proba_3():
"""Assert that the TPOT predict_proba function raises a RuntimeError when no optimized pipeline exists."""
tpot_obj = TPOTClassifier()
tpot_obj._fit_init()
assert_raises(RuntimeError, tpot_obj.predict_proba, testing_features)
def test_predict_proba_4():
"""Assert that the TPOT predict_proba function raises a RuntimeError when the optimized pipeline do not have the predict_proba() function"""
tpot_obj = TPOTRegressor()
tpot_obj._fit_init()
pipeline_string = (
"ExtraTreesRegressor(input_matrix, "
"ExtraTreesRegressor__bootstrap=True, ExtraTreesRegressor__max_features=0.5,"
"ExtraTreesRegressor__min_samples_leaf=5, ExtraTreesRegressor__min_samples_split=5, "
"ExtraTreesRegressor__n_estimators=100)"
)
tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
tpot_obj.fitted_pipeline_ = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)
tpot_obj.fitted_pipeline_.fit(training_features_r, training_target_r)
assert_raises(RuntimeError, tpot_obj.predict_proba, testing_features)
def test_predict_proba_5():
"""Assert that the TPOT predict_proba function works on dataset with nan."""
tpot_obj = TPOTClassifier()
tpot_obj._fit_init()
pipeline_string = (
'DecisionTreeClassifier('
'input_matrix, '
'DecisionTreeClassifier__criterion=gini, '
'DecisionTreeClassifier__max_depth=8, '
'DecisionTreeClassifier__min_samples_leaf=5, '
'DecisionTreeClassifier__min_samples_split=5)'
)
tpot_obj._optimized_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
tpot_obj.fitted_pipeline_ = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)
tpot_obj.fitted_pipeline_.fit(training_features, training_target)
result = tpot_obj.predict_proba(features_with_nan)
num_labels = np.amax(training_target) + 1
assert result.shape == (features_with_nan.shape[0], num_labels)
def test_warm_start():
"""Assert that the TPOT warm_start flag stores the pop and pareto_front from the first run."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light',
warm_start=True)
tpot_obj.fit(pretest_X, pretest_y)
assert tpot_obj._pop is not None
assert tpot_obj._pareto_front is not None
first_pop = tpot_obj._pop
tpot_obj.random_state = 21
tpot_obj.fit(pretest_X, pretest_y)
assert tpot_obj._pop == first_pop
assert tpot_obj._pop == first_pop
assert tpot_obj._pop == first_pop
def test_fit():
"""Assert that the TPOT fit function provides an optimized pipeline."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0
)
tpot_obj.fit(pretest_X, pretest_y)
assert isinstance(tpot_obj._optimized_pipeline, creator.Individual)
assert not (tpot_obj._start_datetime is None)
def test_fit_2():
"""Assert that the TPOT fit function provides an optimized pipeline when config_dict is 'TPOT light'."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj.fit(training_features, training_target)
assert isinstance(tpot_obj._optimized_pipeline, creator.Individual)
assert not (tpot_obj._start_datetime is None)
def test_fit_3():
"""Assert that the TPOT fit function provides an optimized pipeline with subsample of 0.8."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
subsample=0.8,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj.fit(training_features, training_target)
assert isinstance(tpot_obj._optimized_pipeline, creator.Individual)
assert not (tpot_obj._start_datetime is None)
def test_fit_4():
"""Assert that the TPOT fit function provides an optimized pipeline with max_time_mins of 2 second."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=2,
generations=1,
verbosity=0,
max_time_mins=2/60.,
config_dict='TPOT light'
)
tpot_obj._fit_init()
assert tpot_obj.generations == 1000000
# reset generations to 20 just in case that the failed test may take too much time
tpot_obj.generations == 20
tpot_obj.fit(training_features, training_target)
assert isinstance(tpot_obj._optimized_pipeline, creator.Individual)
assert not (tpot_obj._start_datetime is None)
def test_fit_5():
"""Assert that the TPOT fit function provides an optimized pipeline with pandas DataFrame"""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0
)
tpot_obj.fit(pd_features, pd_target)
assert isinstance(pd_features, pd.DataFrame)
assert isinstance(tpot_obj._optimized_pipeline, creator.Individual)
assert not (tpot_obj._start_datetime is None)
def test_memory():
"""Assert that the TPOT fit function runs normally with memory=\'auto\'."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
config_dict='TPOT light',
memory='auto',
verbosity=0
)
tpot_obj.fit(training_features, training_target)
assert isinstance(tpot_obj._optimized_pipeline, creator.Individual)
assert not (tpot_obj._start_datetime is None)
assert tpot_obj.memory is not None
assert tpot_obj._memory is None
assert tpot_obj._cachedir is not None
assert not os.path.isdir(tpot_obj._cachedir)
def test_memory_2():
"""Assert that the TPOT _setup_memory function runs normally with a valid path."""
cachedir = mkdtemp()
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
config_dict='TPOT light',
memory=cachedir,
verbosity=0
)
tpot_obj._setup_memory()
rmtree(cachedir)
assert tpot_obj._cachedir == cachedir
assert isinstance(tpot_obj._memory, Memory)
def test_memory_3():
"""Assert that the TPOT fit function does not clean up caching directory when memory is a valid path."""
cachedir = mkdtemp()
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
config_dict='TPOT light',
memory=cachedir,
verbosity=0
)
tpot_obj.fit(training_features, training_target)
assert tpot_obj._cachedir == cachedir
assert os.path.isdir(tpot_obj._cachedir)
assert isinstance(tpot_obj._memory, Memory)
# clean up
rmtree(cachedir)
tpot_obj._memory = None
def test_memory_4():
"""Assert that the TPOT _setup_memory function rasies ValueError with a invalid path."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
config_dict='TPOT light',
memory="./fake_temp_dir",
verbosity=0
)
assert_raises(ValueError, tpot_obj._setup_memory)
def test_memory_5():
"""Assert that the TPOT _setup_memory function runs normally with a Memory object."""
cachedir = mkdtemp()
memory = Memory(cachedir=cachedir, verbose=0)
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
config_dict='TPOT light',
memory=memory,
verbosity=0
)
tpot_obj._setup_memory()
rmtree(cachedir)
assert tpot_obj.memory == memory
assert tpot_obj._memory == memory
# clean up
tpot_obj._memory = None
memory = None
def test_memory_6():
"""Assert that the TPOT _setup_memory function rasies ValueError with a invalid object."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
config_dict='TPOT light',
memory=str,
verbosity=0
)
assert_raises(ValueError, tpot_obj._setup_memory)
def test_check_periodic_pipeline():
"""Assert that the _check_periodic_pipeline exports periodic pipeline."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj.fit(training_features, training_target)
with closing(StringIO()) as our_file:
tpot_obj._file = our_file
tpot_obj.verbosity = 3
tpot_obj._last_pipeline_write = datetime.now()
sleep(0.11)
tpot_obj._output_best_pipeline_period_seconds = 0.1
tmpdir = mkdtemp() + '/'
tpot_obj.periodic_checkpoint_folder = tmpdir
tpot_obj._check_periodic_pipeline(1)
our_file.seek(0)
assert_in('Saving periodic pipeline from pareto front', our_file.read())
# clean up
rmtree(tmpdir)
def test_check_periodic_pipeline_2():
"""Assert that the _check_periodic_pipeline rasie StopIteration if self._last_optimized_pareto_front_n_gens >= self.early_stop."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj.fit(training_features, training_target)
tpot_obj.early_stop = 3
# will pass
tpot_obj._check_periodic_pipeline(1)
tpot_obj._last_optimized_pareto_front_n_gens = 3
assert_raises(StopIteration, tpot_obj._check_periodic_pipeline, 1)
def test_save_periodic_pipeline():
"""Assert that the _save_periodic_pipeline does not export periodic pipeline if exception happened"""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj.fit(training_features, training_target)
with closing(StringIO()) as our_file:
tpot_obj._file = our_file
tpot_obj.verbosity = 3
tpot_obj._last_pipeline_write = datetime.now()
sleep(0.11)
tpot_obj._output_best_pipeline_period_seconds = 0.1
tmpdir = mkdtemp() + '/'
tpot_obj.periodic_checkpoint_folder = tmpdir
# reset _pareto_front to rasie exception
tpot_obj._pareto_front = None
tpot_obj._save_periodic_pipeline(1)
our_file.seek(0)
assert_in('Failed saving periodic pipeline, exception', our_file.read())
#clean up
rmtree(tmpdir)
def test_save_periodic_pipeline_2():
"""Assert that _save_periodic_pipeline creates the checkpoint folder and exports to it if it didn't exist"""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj.fit(training_features, training_target)
with closing(StringIO()) as our_file:
tpot_obj._file = our_file
tpot_obj.verbosity = 3
tpot_obj._last_pipeline_write = datetime.now()
sleep(0.11)
tpot_obj._output_best_pipeline_period_seconds = 0.1
tmpdir = mkdtemp() + '_test/'
tpot_obj.periodic_checkpoint_folder = tmpdir
tpot_obj._save_periodic_pipeline(1)
our_file.seek(0)
msg = our_file.read()
assert_in('Saving periodic pipeline from pareto front to {}'.format(tmpdir), msg)
assert_in('Created new folder to save periodic pipeline: {}'.format(tmpdir), msg)
#clean up
rmtree(tmpdir)
def test_check_periodic_pipeline_3():
"""Assert that the _save_periodic_pipeline does not export periodic pipeline if the pipeline has been saved before."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj.fit(training_features, training_target)
with closing(StringIO()) as our_file:
tpot_obj._file = our_file
tpot_obj.verbosity = 3
tpot_obj._exported_pipeline_text = []
tpot_obj._last_pipeline_write = datetime.now()
sleep(0.11)
tpot_obj._output_best_pipeline_period_seconds = 0
tmpdir = mkdtemp() + '/'
tpot_obj.periodic_checkpoint_folder = tmpdir
# export once before
tpot_obj._save_periodic_pipeline(1)
tpot_obj._save_periodic_pipeline(2)
our_file.seek(0)
assert_in('Periodic pipeline was not saved, probably saved before...', our_file.read())
#clean up
rmtree(tmpdir)
def test_fit_predict():
"""Assert that the TPOT fit_predict function provides an optimized pipeline and correct output."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
result = tpot_obj.fit_predict(training_features, training_target)
assert isinstance(tpot_obj._optimized_pipeline, creator.Individual)
assert not (tpot_obj._start_datetime is None)
assert result.shape == (training_features.shape[0],)
def test_update_top_pipeline():
"""Assert that the TPOT _update_top_pipeline updated an optimized pipeline."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj.fit(training_features, training_target)
tpot_obj._optimized_pipeline = None
tpot_obj.fitted_pipeline_ = None
tpot_obj._update_top_pipeline()
assert isinstance(tpot_obj._optimized_pipeline, creator.Individual)
def test_update_top_pipeline_2():
"""Assert that the TPOT _update_top_pipeline raises RuntimeError when self._pareto_front is empty."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj.fit(training_features, training_target)
def pareto_eq(ind1, ind2):
return np.allclose(ind1.fitness.values, ind2.fitness.values)
tpot_obj._pareto_front = ParetoFront(similar=pareto_eq)
assert_raises(RuntimeError, tpot_obj._update_top_pipeline)
def test_update_top_pipeline_3():
"""Assert that the TPOT _update_top_pipeline raises RuntimeError when self._optimized_pipeline is not updated."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj.fit(training_features, training_target)
tpot_obj._optimized_pipeline = None
# reset the fitness score to -float('inf')
for pipeline_scores in reversed(tpot_obj._pareto_front.keys):
pipeline_scores.wvalues = (5000., -float('inf'))
assert_raises(RuntimeError, tpot_obj._update_top_pipeline)
def test_summary_of_best_pipeline():
"""Assert that the TPOT _update_top_pipeline raises RuntimeError when self._optimized_pipeline is not updated."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj._fit_init()
assert_raises(RuntimeError, tpot_obj._summary_of_best_pipeline, features=training_features, target=training_target)
def test_set_param_recursive():
"""Assert that _set_param_recursive sets \"random_state\" to 42 in all steps in a simple pipeline."""
pipeline_string = (
'DecisionTreeClassifier(PCA(input_matrix, PCA__iterated_power=5, PCA__svd_solver=randomized), '
'DecisionTreeClassifier__criterion=gini, DecisionTreeClassifier__max_depth=8, '
'DecisionTreeClassifier__min_samples_leaf=5, DecisionTreeClassifier__min_samples_split=5)'
)
deap_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
sklearn_pipeline = tpot_obj._toolbox.compile(expr=deap_pipeline)
tpot_obj._set_param_recursive(sklearn_pipeline.steps, 'random_state', 42)
# assert "random_state" of PCA at step 1
assert getattr(sklearn_pipeline.steps[0][1], 'random_state') == 42
# assert "random_state" of DecisionTreeClassifier at step 2
assert getattr(sklearn_pipeline.steps[1][1], 'random_state') == 42
def test_set_param_recursive_2():
"""Assert that _set_param_recursive sets \"random_state\" to 42 in nested estimator in SelectFromModel."""
pipeline_string = (
'DecisionTreeRegressor(SelectFromModel(input_matrix, '
'SelectFromModel__ExtraTreesRegressor__max_features=0.05, SelectFromModel__ExtraTreesRegressor__n_estimators=100, '
'SelectFromModel__threshold=0.05), DecisionTreeRegressor__max_depth=8,'
'DecisionTreeRegressor__min_samples_leaf=5, DecisionTreeRegressor__min_samples_split=5)'
)
tpot_obj = TPOTRegressor()
tpot_obj._fit_init()
deap_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
sklearn_pipeline = tpot_obj._toolbox.compile(expr=deap_pipeline)
tpot_obj._set_param_recursive(sklearn_pipeline.steps, 'random_state', 42)
assert getattr(getattr(sklearn_pipeline.steps[0][1], 'estimator'), 'random_state') == 42
assert getattr(sklearn_pipeline.steps[1][1], 'random_state') == 42
def test_set_param_recursive_3():
"""Assert that _set_param_recursive sets \"random_state\" to 42 in nested estimator in StackingEstimator in a complex pipeline."""
pipeline_string = (
'DecisionTreeClassifier(CombineDFs('
'DecisionTreeClassifier(input_matrix, DecisionTreeClassifier__criterion=gini, '
'DecisionTreeClassifier__max_depth=8, DecisionTreeClassifier__min_samples_leaf=5,'
'DecisionTreeClassifier__min_samples_split=5),input_matrix) '
'DecisionTreeClassifier__criterion=gini, DecisionTreeClassifier__max_depth=8, '
'DecisionTreeClassifier__min_samples_leaf=5, DecisionTreeClassifier__min_samples_split=5)'
)
deap_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
sklearn_pipeline = tpot_obj._toolbox.compile(expr=deap_pipeline)
tpot_obj._set_param_recursive(sklearn_pipeline.steps, 'random_state', 42)
# StackingEstimator under the transformer_list of FeatureUnion
assert getattr(getattr(sklearn_pipeline.steps[0][1].transformer_list[0][1], 'estimator'), 'random_state') == 42
assert getattr(sklearn_pipeline.steps[1][1], 'random_state') == 42
def test_evaluated_individuals_():
"""Assert that evaluated_individuals_ stores current pipelines and their CV scores."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=2,
offspring_size=4,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj.fit(training_features, training_target)
assert isinstance(tpot_obj.evaluated_individuals_, dict)
for pipeline_string in sorted(tpot_obj.evaluated_individuals_.keys()):
deap_pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
sklearn_pipeline = tpot_obj._toolbox.compile(expr=deap_pipeline)
tpot_obj._set_param_recursive(sklearn_pipeline.steps, 'random_state', 42)
operator_count = tpot_obj._operator_count(deap_pipeline)
try:
cv_scores = cross_val_score(sklearn_pipeline, training_features, training_target, cv=5, scoring='accuracy', verbose=0)
mean_cv_scores = np.mean(cv_scores)
except Exception as e:
mean_cv_scores = -float('inf')
assert np.allclose(tpot_obj.evaluated_individuals_[pipeline_string]['internal_cv_score'], mean_cv_scores)
assert np.allclose(tpot_obj.evaluated_individuals_[pipeline_string]['operator_count'], operator_count)
def test_stop_by_max_time_mins():
"""Assert that _stop_by_max_time_mins raises KeyboardInterrupt when maximum minutes have elapsed."""
tpot_obj = TPOTClassifier(config_dict='TPOT light')
tpot_obj._start_datetime = datetime.now()
sleep(0.11)
tpot_obj.max_time_mins = 0.1/60.
assert_raises(KeyboardInterrupt, tpot_obj._stop_by_max_time_mins)
def test_update_evaluated_individuals_():
"""Assert that _update_evaluated_individuals_ raises ValueError when scoring function does not return a float."""
tpot_obj = TPOTClassifier(config_dict='TPOT light')
assert_raises(ValueError, tpot_obj._update_evaluated_individuals_, ['Non-Float-Score'], ['Test_Pipeline'], [1], [dict])
def test_evaluate_individuals():
"""Assert that _evaluate_individuals returns operator_counts and CV scores in correct order."""
tpot_obj = TPOTClassifier(
random_state=42,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj._fit_init()
def pareto_eq(ind1, ind2):
return np.allclose(ind1.fitness.values, ind2.fitness.values)
tpot_obj._pareto_front = ParetoFront(similar=pareto_eq)
tpot_obj._pbar = tqdm(total=1, disable=True)
pop = tpot_obj._toolbox.population(n=10)
pop = tpot_obj._evaluate_individuals(pop, training_features, training_target)
fitness_scores = [ind.fitness.values for ind in pop]
for deap_pipeline, fitness_score in zip(pop, fitness_scores):
operator_count = tpot_obj._operator_count(deap_pipeline)
sklearn_pipeline = tpot_obj._toolbox.compile(expr=deap_pipeline)
tpot_obj._set_param_recursive(sklearn_pipeline.steps, 'random_state', 42)
try:
cv_scores = cross_val_score(sklearn_pipeline, training_features, training_target, cv=5, scoring='accuracy', verbose=0)
mean_cv_scores = np.mean(cv_scores)
except Exception as e:
mean_cv_scores = -float('inf')
assert isinstance(deap_pipeline, creator.Individual)
assert np.allclose(fitness_score[0], operator_count)
assert np.allclose(fitness_score[1], mean_cv_scores)
def test_evaluate_individuals_2():
"""Assert that _evaluate_individuals returns operator_counts and CV scores in correct order with n_jobs=2"""
tpot_obj = TPOTClassifier(
n_jobs=2,
random_state=42,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj._fit_init()
def pareto_eq(ind1, ind2):
return np.allclose(ind1.fitness.values, ind2.fitness.values)
tpot_obj._pareto_front = ParetoFront(similar=pareto_eq)
tpot_obj._pbar = tqdm(total=1, disable=True)
pop = tpot_obj._toolbox.population(n=10)
pop = tpot_obj._evaluate_individuals(pop, training_features, training_target)
fitness_scores = [ind.fitness.values for ind in pop]
for deap_pipeline, fitness_score in zip(pop, fitness_scores):
operator_count = tpot_obj._operator_count(deap_pipeline)
sklearn_pipeline = tpot_obj._toolbox.compile(expr=deap_pipeline)
tpot_obj._set_param_recursive(sklearn_pipeline.steps, 'random_state', 42)
try:
cv_scores = cross_val_score(sklearn_pipeline, training_features, training_target, cv=5, scoring='accuracy', verbose=0)
mean_cv_scores = np.mean(cv_scores)
except Exception as e:
mean_cv_scores = -float('inf')
assert isinstance(deap_pipeline, creator.Individual)
assert np.allclose(fitness_score[0], operator_count)
assert np.allclose(fitness_score[1], mean_cv_scores)
def test_update_pbar():
"""Assert that _update_pbar updates self._pbar with printing correct warning message."""
tpot_obj = TPOTClassifier(
random_state=42,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj._fit_init()
# reset verbosity = 3 for checking pbar message
tpot_obj.verbosity = 3
with closing(StringIO()) as our_file:
tpot_obj._file=our_file
tpot_obj._pbar = tqdm(total=10, disable=False, file=our_file)
tpot_obj._update_pbar(pbar_num=2, pbar_msg="Test Warning Message")
our_file.seek(0)
assert_in("Test Warning Message", our_file.read())
assert_equal(tpot_obj._pbar.n, 2)
def test_update_val():
"""Assert _update_val updates result score in list and prints timeout message."""
tpot_obj = TPOTClassifier(
random_state=42,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj._fit_init()
# reset verbosity = 3 for checking pbar message
tpot_obj.verbosity = 3
with closing(StringIO()) as our_file:
tpot_obj._file=our_file
tpot_obj._pbar = tqdm(total=10, disable=False, file=our_file)
result_score_list = []
result_score_list = tpot_obj._update_val(0.9999, result_score_list)
assert_equal(result_score_list, [0.9999])
# check "Timeout"
result_score_list = tpot_obj._update_val("Timeout", result_score_list)
our_file.seek(0)
assert_in("Skipped pipeline #2 due to time out.", our_file.read())
assert_equal(result_score_list, [0.9999, -float('inf')])
def test_preprocess_individuals():
"""Assert _preprocess_individuals preprocess DEAP individuals including one evaluated individual"""
tpot_obj = TPOTClassifier(
random_state=42,
verbosity=0
)
tpot_obj._fit_init()
pipeline_string_1 = (
'LogisticRegression(PolynomialFeatures'
'(input_matrix, PolynomialFeatures__degree=2, PolynomialFeatures__include_bias=False, '
'PolynomialFeatures__interaction_only=False), LogisticRegression__C=10.0, '
'LogisticRegression__dual=False, LogisticRegression__penalty=l2)'
)
# a normal pipeline
pipeline_string_2 = (
'DecisionTreeClassifier('
'input_matrix, '
'DecisionTreeClassifier__criterion=gini, '
'DecisionTreeClassifier__max_depth=8, '
'DecisionTreeClassifier__min_samples_leaf=5, '
'DecisionTreeClassifier__min_samples_split=5)'
)
individuals = []
individuals.append(creator.Individual.from_string(pipeline_string_1, tpot_obj._pset))
individuals.append(creator.Individual.from_string(pipeline_string_2, tpot_obj._pset))
# set pipeline 2 has been evaluated before
tpot_obj.evaluated_individuals_[pipeline_string_2] = (1, 0.99999)
# reset verbosity = 3 for checking pbar message
tpot_obj.verbosity = 3
with closing(StringIO()) as our_file:
tpot_obj._file=our_file
tpot_obj._pbar = tqdm(total=2, disable=False, file=our_file)
operator_counts, eval_individuals_str, sklearn_pipeline_list, stats_dicts = \
tpot_obj._preprocess_individuals(individuals)
our_file.seek(0)
assert_in("Pipeline encountered that has previously been evaluated", our_file.read())
assert_in(pipeline_string_1, eval_individuals_str)
assert_equal(operator_counts[pipeline_string_1], 2)
assert_equal(len(sklearn_pipeline_list), 1)
def test_preprocess_individuals_2():
"""Assert _preprocess_individuals preprocess DEAP individuals with one invalid pipeline"""
tpot_obj = TPOTClassifier(
random_state=42,
verbosity=0
)
tpot_obj._fit_init()
# pipeline with two PolynomialFeatures operator
pipeline_string_1 = (
'LogisticRegression(PolynomialFeatures'
'(PolynomialFeatures(input_matrix, PolynomialFeatures__degree=2, '
'PolynomialFeatures__include_bias=False, PolynomialFeatures__interaction_only=False), '
'PolynomialFeatures__degree=2, PolynomialFeatures__include_bias=False, '
'PolynomialFeatures__interaction_only=False), LogisticRegression__C=10.0, '
'LogisticRegression__dual=False, LogisticRegression__penalty=l2)'
)
# a normal pipeline
pipeline_string_2 = (
'DecisionTreeClassifier('
'input_matrix, '
'DecisionTreeClassifier__criterion=gini, '
'DecisionTreeClassifier__max_depth=8, '
'DecisionTreeClassifier__min_samples_leaf=5, '
'DecisionTreeClassifier__min_samples_split=5)'
)
individuals = []
individuals.append(creator.Individual.from_string(pipeline_string_1, tpot_obj._pset))
individuals.append(creator.Individual.from_string(pipeline_string_2, tpot_obj._pset))
# reset verbosity = 3 for checking pbar message
tpot_obj.verbosity = 3
with closing(StringIO()) as our_file:
tpot_obj._file=our_file
tpot_obj._pbar = tqdm(total=3, disable=False, file=our_file)
operator_counts, eval_individuals_str, sklearn_pipeline_list, stats_dicts = \
tpot_obj._preprocess_individuals(individuals)
our_file.seek(0)
assert_in("Invalid pipeline encountered. Skipping its evaluation.", our_file.read())
assert_in(pipeline_string_2, eval_individuals_str)
assert_equal(operator_counts[pipeline_string_2], 1)
assert_equal(len(sklearn_pipeline_list), 1)
def test_preprocess_individuals_3():
"""Assert _preprocess_individuals updatas self._pbar.total when max_time_mins is not None"""
tpot_obj = TPOTClassifier(
population_size=2,
offspring_size=4,
random_state=42,
max_time_mins=5,
verbosity=0
)
tpot_obj._fit_init()
pipeline_string_1 = (
'LogisticRegression(PolynomialFeatures'
'(input_matrix, PolynomialFeatures__degree=2, PolynomialFeatures__include_bias=False, '
'PolynomialFeatures__interaction_only=False), LogisticRegression__C=10.0, '
'LogisticRegression__dual=False, LogisticRegression__penalty=l2)'
)
# a normal pipeline
pipeline_string_2 = (
'DecisionTreeClassifier('
'input_matrix, '
'DecisionTreeClassifier__criterion=gini, '
'DecisionTreeClassifier__max_depth=8, '
'DecisionTreeClassifier__min_samples_leaf=5, '
'DecisionTreeClassifier__min_samples_split=5)'
)
individuals = []
individuals.append(creator.Individual.from_string(pipeline_string_1, tpot_obj._pset))
individuals.append(creator.Individual.from_string(pipeline_string_2, tpot_obj._pset))
# reset verbosity = 3 for checking pbar message
with closing(StringIO()) as our_file:
tpot_obj._file=our_file
tpot_obj._lambda=4
tpot_obj._pbar = tqdm(total=2, disable=False, file=our_file)
tpot_obj._pbar.n = 2
operator_counts, eval_individuals_str, sklearn_pipeline_list, stats_dicts = \
tpot_obj._preprocess_individuals(individuals)
assert tpot_obj._pbar.total == 6
def test_check_dataset():
"""Assert that the check_dataset function returns feature and target as expected."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
ret_features, ret_target = tpot_obj._check_dataset(training_features, training_target)
assert np.allclose(ret_features, training_features)
assert np.allclose(ret_target, training_target)
def test_check_dataset_2():
"""Assert that the check_dataset function raise ValueError when sample_weight can not be converted to float array"""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
test_sample_weight = list(range(1, len(training_target)+1))
ret_features, ret_target = tpot_obj._check_dataset(training_features, training_target, test_sample_weight)
test_sample_weight[0] = 'opps'
assert_raises(ValueError, tpot_obj._check_dataset, training_features, training_target, test_sample_weight)
def test_check_dataset_3():
"""Assert that the check_dataset function raise ValueError when sample_weight has NaN"""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
test_sample_weight = list(range(1, len(training_target)+1))
ret_features, ret_target = tpot_obj._check_dataset(training_features, training_target, test_sample_weight)
test_sample_weight[0] = np.nan
assert_raises(ValueError, tpot_obj._check_dataset, training_features, training_target, test_sample_weight)
def test_check_dataset_4():
"""Assert that the check_dataset function raise ValueError when sample_weight has a length different length"""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
test_sample_weight = list(range(1, len(training_target)))
assert_raises(ValueError, tpot_obj._check_dataset, training_features, training_target, test_sample_weight)
def test_check_dataset_5():
"""Assert that the check_dataset function returns feature and target as expected."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
ret_features = tpot_obj._check_dataset(training_features, target=None)
assert np.allclose(ret_features, training_features)
def test_imputer():
"""Assert that the TPOT fit function will not raise a ValueError in a dataset where NaNs are present."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj.fit(features_with_nan, training_target)
def test_imputer_2():
"""Assert that the TPOT predict function will not raise a ValueError in a dataset where NaNs are present."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj.fit(training_features, training_target)
assert_equal(tpot_obj._fitted_imputer, None)
tpot_obj.predict(features_with_nan)
assert_not_equal(tpot_obj._fitted_imputer, None)
def test_imputer_3():
"""Assert that the TPOT _impute_values function returns a feature matrix with imputed NaN values."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=2,
config_dict='TPOT light'
)
tpot_obj._fit_init()
with captured_output() as (out, err):
imputed_features = tpot_obj._impute_values(features_with_nan)
assert_in("Imputing missing values in feature set", out.getvalue())
assert_not_equal(imputed_features[0][0], float('nan'))
def test_imputer_4():
"""Assert that the TPOT score function will not raise a ValueError in a dataset where NaNs are present."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj.fit(training_features, training_target)
assert_equal(tpot_obj._fitted_imputer, None)
tpot_obj.score(features_with_nan, training_target)
assert_not_equal(tpot_obj._fitted_imputer, None)
def test_sparse_matrix():
"""Assert that the TPOT fit function will raise a ValueError in a sparse matrix with config_dict='TPOT light'."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT light'
)
assert_raises(ValueError, tpot_obj.fit, sparse_features, sparse_target)
def test_sparse_matrix_2():
"""Assert that the TPOT fit function will raise a ValueError in a sparse matrix with config_dict=None."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict=None
)
assert_raises(ValueError, tpot_obj.fit, sparse_features, sparse_target)
def test_sparse_matrix_3():
"""Assert that the TPOT fit function will raise a ValueError in a sparse matrix with config_dict='TPOT MDR'."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT MDR'
)
assert_raises(ValueError, tpot_obj.fit, sparse_features, sparse_target)
def test_sparse_matrix_4():
"""Assert that the TPOT fit function will not raise a ValueError in a sparse matrix with config_dict='TPOT sparse'."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='TPOT sparse'
)
tpot_obj.fit(sparse_features, sparse_target)
def test_sparse_matrix_5():
"""Assert that the TPOT fit function will not raise a ValueError in a sparse matrix with a customized config dictionary."""
tpot_obj = TPOTClassifier(
random_state=42,
population_size=1,
offspring_size=2,
generations=1,
verbosity=0,
config_dict='tests/test_config_sparse.py'
)
tpot_obj.fit(sparse_features, sparse_target)
def test_source_decode():
"""Assert that the source_decode can decode operator source and import operator class."""
import_str, op_str, op_obj = source_decode("sklearn.linear_model.LogisticRegression")
from sklearn.linear_model import LogisticRegression
assert import_str == "sklearn.linear_model"
assert op_str == "LogisticRegression"
assert op_obj == LogisticRegression
def test_source_decode_2():
"""Assert that the source_decode return None when sourcecode is not available."""
import_str, op_str, op_obj = source_decode("sklearn.linear_model.LogisticReg")
from sklearn.linear_model import LogisticRegression
assert import_str == "sklearn.linear_model"
assert op_str == "LogisticReg"
assert op_obj is None
def test_source_decode_3():
"""Assert that the source_decode raise ImportError when sourcecode is not available and verbose=3."""
assert_raises(ImportError, source_decode, "sklearn.linear_model.LogisticReg", 3)
def test_tpot_operator_factory_class():
"""Assert that the TPOT operators class factory."""
test_config_dict = {
'sklearn.svm.LinearSVC': {
'penalty': ["l1", "l2"],
'loss': ["hinge", "squared_hinge"],
'dual': [True, False],
'tol': [1e-5, 1e-4, 1e-3, 1e-2, 1e-1],
'C': [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1., 5., 10., 15., 20., 25.]
},
'sklearn.linear_model.LogisticRegression': {
'penalty': ["l1", "l2"],
'C': [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1., 5., 10., 15., 20., 25.],
'dual': [True, False]
},
'sklearn.preprocessing.Binarizer': {
'threshold': np.arange(0.0, 1.01, 0.05)
}
}
tpot_operator_list = []
tpot_argument_list = []
for key in sorted(test_config_dict.keys()):
op, args = TPOTOperatorClassFactory(key, test_config_dict[key])
tpot_operator_list.append(op)
tpot_argument_list += args
assert len(tpot_operator_list) == 3
assert len(tpot_argument_list) == 9
assert tpot_operator_list[0].root is True
assert tpot_operator_list[1].root is False
assert tpot_operator_list[2].type() == "Classifier or Regressor"
assert tpot_argument_list[1].values == [True, False]
def test_PolynomialFeatures_exception():
"""Assert that TPOT allows only one PolynomialFeatures operator in a pipeline."""
tpot_obj._pbar = tqdm(total=1, disable=True)
def pareto_eq(ind1, ind2):
return np.allclose(ind1.fitness.values, ind2.fitness.values)
tpot_obj._pareto_front = ParetoFront(similar=pareto_eq)
# pipeline with one PolynomialFeatures operator
pipeline_string_1 = (
'LogisticRegression(PolynomialFeatures'
'(input_matrix, PolynomialFeatures__degree=2, PolynomialFeatures__include_bias=False, '
'PolynomialFeatures__interaction_only=False), LogisticRegression__C=10.0, '
'LogisticRegression__dual=False, LogisticRegression__penalty=l2)'
)
# pipeline with two PolynomialFeatures operator
pipeline_string_2 = (
'LogisticRegression(PolynomialFeatures'
'(PolynomialFeatures(input_matrix, PolynomialFeatures__degree=2, '
'PolynomialFeatures__include_bias=False, PolynomialFeatures__interaction_only=False), '
'PolynomialFeatures__degree=2, PolynomialFeatures__include_bias=False, '
'PolynomialFeatures__interaction_only=False), LogisticRegression__C=10.0, '
'LogisticRegression__dual=False, LogisticRegression__penalty=l2)'
)
# make a list for _evaluate_individuals
pipelines = []
pipelines.append(creator.Individual.from_string(pipeline_string_1, tpot_obj._pset))
pipelines.append(creator.Individual.from_string(pipeline_string_2, tpot_obj._pset))
for pipeline in pipelines:
initialize_stats_dict(pipeline)
pop = tpot_obj._evaluate_individuals(pipelines, pretest_X, pretest_y)
fitness_scores = [ind.fitness.values for ind in pop]
known_scores = [(2, 0.94000000000000006), (5000.0, -float('inf'))]
assert np.allclose(known_scores, fitness_scores)
def test_pick_two_individuals_eligible_for_crossover():
"""Assert that pick_two_individuals_eligible_for_crossover() picks the correct pair of nodes to perform crossover with"""
ind1 = creator.Individual.from_string(
'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)',
tpot_obj._pset
)
ind2 = creator.Individual.from_string(
'BernoulliNB(input_matrix, BernoulliNB__alpha=10.0, BernoulliNB__fit_prior=True)',
tpot_obj._pset
)
ind3 = creator.Individual.from_string(
'GaussianNB(input_matrix)',
tpot_obj._pset
)
pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1, ind2, ind3])
assert ((str(pick1) == str(ind1) and str(pick2) == str(ind2)) or
str(pick1) == str(ind2) and str(pick2) == str(ind1))
ind4 = creator.Individual.from_string(
'KNeighborsClassifier('
'BernoulliNB(input_matrix, BernoulliNB__alpha=10.0, BernoulliNB__fit_prior=True),'
'KNeighborsClassifier__n_neighbors=10, '
'KNeighborsClassifier__p=1, '
'KNeighborsClassifier__weights=uniform'
')',
tpot_obj._pset
)
# Eventhough ind4 does not have the same primitive at the root, the tree shares a primitive with ind1
pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1, ind3, ind4])
assert ((str(pick1) == str(ind1) and str(pick2) == str(ind4)) or
str(pick1) == str(ind4) and str(pick2) == str(ind1))
def test_pick_two_individuals_eligible_for_crossover_bad():
"""Assert that pick_two_individuals_eligible_for_crossover() returns the right output when no pair is eligible"""
ind1 = creator.Individual.from_string(
'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)',
tpot_obj._pset
)
ind2 = creator.Individual.from_string(
'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)',
tpot_obj._pset
)
ind3 = creator.Individual.from_string(
'GaussianNB(input_matrix)',
tpot_obj._pset
)
# Ind1 and ind2 are not a pair because they are the same, ind3 shares no primitive
pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1, ind2, ind3])
assert pick1 is None and pick2 is None
# You can not do crossover with a population of only 1.
pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1])
assert pick1 is None and pick2 is None
# You can not do crossover with a population of 0.
pick1, pick2 = pick_two_individuals_eligible_for_crossover([])
assert pick1 is None and pick2 is None
def test_mate_operator():
"""Assert that self._mate_operator returns offsprings as expected."""
ind1 = creator.Individual.from_string(
'KNeighborsClassifier('
'BernoulliNB(input_matrix, BernoulliNB__alpha=10.0, BernoulliNB__fit_prior=False),'
'KNeighborsClassifier__n_neighbors=10, '
'KNeighborsClassifier__p=1, '
'KNeighborsClassifier__weights=uniform'
')',
tpot_obj._pset
)
ind2 = creator.Individual.from_string(
'KNeighborsClassifier('
'BernoulliNB(input_matrix, BernoulliNB__alpha=10.0, BernoulliNB__fit_prior=True),'
'KNeighborsClassifier__n_neighbors=10, '
'KNeighborsClassifier__p=2, '
'KNeighborsClassifier__weights=uniform'
')',
tpot_obj._pset
)
# Initialize stats
initialize_stats_dict(ind1)
initialize_stats_dict(ind2)
# set as evaluated pipelines in tpot_obj.evaluated_individuals_
tpot_obj.evaluated_individuals_[str(ind1)] = (2, 0.99)
tpot_obj.evaluated_individuals_[str(ind2)] = (2, 0.99)
offspring1, offspring2 = tpot_obj._mate_operator(ind1, ind2)
expected_offspring1 = (
'KNeighborsClassifier('
'BernoulliNB(input_matrix, BernoulliNB__alpha=10.0, BernoulliNB__fit_prior=False), '
'KNeighborsClassifier__n_neighbors=10, '
'KNeighborsClassifier__p=2, '
'KNeighborsClassifier__weights=uniform'
')'
)
expected_offspring1_alt = (
'KNeighborsClassifier('
'BernoulliNB(input_matrix, BernoulliNB__alpha=10.0, BernoulliNB__fit_prior=True), '
'KNeighborsClassifier__n_neighbors=10, '
'KNeighborsClassifier__p=1, '
'KNeighborsClassifier__weights=uniform'
')'
)
assert str(offspring1) in [expected_offspring1, expected_offspring1_alt]
def test_cxOnePoint():
"""Assert that cxOnePoint() returns the correct type of node between two fixed pipelines."""
ind1 = creator.Individual.from_string(
'KNeighborsClassifier('
'BernoulliNB(input_matrix, BernoulliNB__alpha=10.0, BernoulliNB__fit_prior=False),'
'KNeighborsClassifier__n_neighbors=10, '
'KNeighborsClassifier__p=1, '
'KNeighborsClassifier__weights=uniform'
')',
tpot_obj._pset
)
ind2 = creator.Individual.from_string(
'KNeighborsClassifier('
'BernoulliNB(input_matrix, BernoulliNB__alpha=10.0, BernoulliNB__fit_prior=True),'
'KNeighborsClassifier__n_neighbors=10, '
'KNeighborsClassifier__p=2, '
'KNeighborsClassifier__weights=uniform'
')',
tpot_obj._pset
)
ind1[0].ret = Output_Array
ind2[0].ret = Output_Array
ind1_copy, ind2_copy = tpot_obj._toolbox.clone(ind1),tpot_obj._toolbox.clone(ind2)
offspring1, offspring2 = cxOnePoint(ind1_copy, ind2_copy)
assert offspring1[0].ret == Output_Array
assert offspring2[0].ret == Output_Array
def test_mutNodeReplacement():
"""Assert that mutNodeReplacement() returns the correct type of mutation node in a fixed pipeline."""
pipeline_string = (
'LogisticRegression(PolynomialFeatures'
'(input_matrix, PolynomialFeatures__degree=2, PolynomialFeatures__include_bias=False, '
'PolynomialFeatures__interaction_only=False), LogisticRegression__C=10.0, '
'LogisticRegression__dual=False, LogisticRegression__penalty=l2)'
)
pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
pipeline[0].ret = Output_Array
old_ret_type_list = [node.ret for node in pipeline]
old_prims_list = [node for node in pipeline if node.arity != 0]
# test 10 times
for _ in range(10):
mut_ind = mutNodeReplacement(tpot_obj._toolbox.clone(pipeline), pset=tpot_obj._pset)
new_ret_type_list = [node.ret for node in mut_ind[0]]
new_prims_list = [node for node in mut_ind[0] if node.arity != 0]
if new_prims_list == old_prims_list: # Terminal mutated
assert new_ret_type_list == old_ret_type_list
else: # Primitive mutated
diff_prims = [x for x in new_prims_list if x not in old_prims_list]
diff_prims += [x for x in old_prims_list if x not in new_prims_list]
if len(diff_prims) > 1: # Sometimes mutation randomly replaces an operator that already in the pipelines
assert diff_prims[0].ret == diff_prims[1].ret
assert mut_ind[0][0].ret == Output_Array
def test_mutNodeReplacement_2():
"""Assert that mutNodeReplacement() returns the correct type of mutation node in a complex pipeline."""
tpot_obj = TPOTClassifier()
tpot_obj._fit_init()
# a pipeline with 4 operators
pipeline_string = (
"LogisticRegression("
"KNeighborsClassifier(BernoulliNB(PolynomialFeatures"
"(input_matrix, PolynomialFeatures__degree=2, PolynomialFeatures__include_bias=False, "
"PolynomialFeatures__interaction_only=False), BernoulliNB__alpha=10.0, BernoulliNB__fit_prior=False), "
"KNeighborsClassifier__n_neighbors=10, KNeighborsClassifier__p=1, KNeighborsClassifier__weights=uniform),"
"LogisticRegression__C=10.0, LogisticRegression__dual=False, LogisticRegression__penalty=l2)"
)
pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
pipeline[0].ret = Output_Array
old_ret_type_list = [node.ret for node in pipeline]
old_prims_list = [node for node in pipeline if node.arity != 0]
# test 30 times
for _ in range(30):
mut_ind = mutNodeReplacement(tpot_obj._toolbox.clone(pipeline), pset=tpot_obj._pset)
new_ret_type_list = [node.ret for node in mut_ind[0]]
new_prims_list = [node for node in mut_ind[0] if node.arity != 0]
if new_prims_list == old_prims_list: # Terminal mutated
assert new_ret_type_list == old_ret_type_list
else: # Primitive mutated
Primitive_Count = 0
for node in mut_ind[0]:
if isinstance(node, gp.Primitive):
Primitive_Count += 1
assert Primitive_Count == 4
diff_prims = [x for x in new_prims_list if x not in old_prims_list]
diff_prims += [x for x in old_prims_list if x not in new_prims_list]
if len(diff_prims) > 1: # Sometimes mutation randomly replaces an operator that already in the pipelines
assert diff_prims[0].ret == diff_prims[1].ret
assert mut_ind[0][0].ret == Output_Array
def test_varOr():
"""Assert that varOr() applys crossover only and removes CV scores in offsprings."""
tpot_obj = TPOTClassifier(
random_state=42,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj._fit_init()
tpot_obj._pbar = tqdm(total=1, disable=True)
pop = tpot_obj._toolbox.population(n=5)
for ind in pop:
initialize_stats_dict(ind)
ind.fitness.values = (2, 1.0)
offspring = varOr(pop, tpot_obj._toolbox, 5, cxpb=1.0, mutpb=0.0)
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
assert len(offspring) == 5
assert len(invalid_ind) == 5
def test_varOr_2():
"""Assert that varOr() applys mutation only and removes CV scores in offsprings."""
tpot_obj = TPOTClassifier(
random_state=42,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj._fit_init()
tpot_obj._pbar = tqdm(total=1, disable=True)
pop = tpot_obj._toolbox.population(n=5)
for ind in pop:
initialize_stats_dict(ind)
ind.fitness.values = (2, 1.0)
offspring = varOr(pop, tpot_obj._toolbox, 5, cxpb=0.0, mutpb=1.0)
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
assert len(offspring) == 5
assert len(invalid_ind) == 5
def test_varOr_3():
"""Assert that varOr() applys reproduction only and does NOT remove CV scores in offsprings."""
tpot_obj = TPOTClassifier(
random_state=42,
verbosity=0,
config_dict='TPOT light'
)
tpot_obj._fit_init()
tpot_obj._pbar = tqdm(total=1, disable=True)
pop = tpot_obj._toolbox.population(n=5)
for ind in pop:
ind.fitness.values = (2, 1.0)
offspring = varOr(pop, tpot_obj._toolbox, 5, cxpb=0.0, mutpb=0.0)
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
assert len(offspring) == 5
assert len(invalid_ind) == 0
def test_operator_type():
"""Assert that TPOT operators return their type, e.g. 'Classifier', 'Preprocessor'."""
assert TPOTSelectPercentile.type() == "Preprocessor or Selector"
def test_gen():
"""Assert that TPOT's gen_grow_safe function returns a pipeline of expected structure."""
pipeline = tpot_obj._gen_grow_safe(tpot_obj._pset, 1, 3)
assert len(pipeline) > 1
assert pipeline[0].ret == Output_Array
def test_clean_pipeline_string():
"""Assert that clean_pipeline_string correctly returns a string without parameter prefixes"""
with_prefix = 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)'
without_prefix = 'BernoulliNB(input_matrix, alpha=1.0, fit_prior=True)'
ind1 = creator.Individual.from_string(with_prefix, tpot_obj._pset)
pretty_string = tpot_obj.clean_pipeline_string(ind1)
assert pretty_string == without_prefix
| rhiever/tpot | tests/tpot_tests.py | Python | lgpl-3.0 | 82,897 |
# ==================================================================================================
# Copyright 2014 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from zktraffic.base.zookeeper import OpCodes
from zktraffic.stats.accumulated_stats import AccumulatedStats, StatsConfig
from zktraffic.base.sniffer import Sniffer, SnifferConfig
from .common import consume_packets
def default_zkt():
stats = AccumulatedStats(StatsConfig())
zkt = Sniffer(SnifferConfig())
zkt.add_request_handler(stats.handle_request)
return (zkt, stats)
# TODO(rgs): assert amount of bytes in writes/reads/ops
def test_packets_set_data():
zkt, stats = default_zkt()
consume_packets('set_data', zkt)
assert stats.global_stats.ops_written == 20
assert stats.global_stats.by_op_counters[OpCodes.SETDATA] == 20
# Now check that each path has the right stats...
for i in range(0, 5):
assert stats.by_path["/load-testing/%d" % (i)].ops_written == 4
assert stats.by_path["/load-testing/%d" % (i)].by_op_counters[OpCodes.SETDATA] == 4
def test_packets_create_delete():
zkt, stats = default_zkt()
consume_packets('create', zkt)
assert stats.global_stats.ops_written == 45
assert stats.global_stats.by_op_counters[OpCodes.DELETE] == 20
assert stats.global_stats.by_op_counters[OpCodes.CREATE] == 25
# Now check that each path has the right stats...
for i in range(0, 5):
assert stats.by_path["/load-testing/%d" % (i)].ops_written == 9
assert stats.by_path["/load-testing/%d" % (i)].by_op_counters[OpCodes.DELETE] == 4
assert stats.by_path["/load-testing/%d" % (i)].by_op_counters[OpCodes.CREATE] == 5
# py-zookeeper (so the C library) doesn't add the request length when issuing Creates so lets
# exercise that special parsing case
def test_create_znode_pyzookeeper():
zkt, stats = default_zkt()
consume_packets('create-pyzookeeper', zkt)
assert stats.by_path["/git/twitter-config_sha"].ops_written == 1
assert stats.by_path["/git/twitter-config_sha"].by_op_counters[OpCodes.CREATE] == 1
assert stats.by_path["/git/twitter-config_sha"].bytes_written == 60
def test_watches():
zkt, stats = default_zkt()
consume_packets('getdata_watches', zkt)
assert stats.global_stats.by_op_counters[OpCodes.GETDATA] == 2
assert stats.global_stats.by_op_counters[OpCodes.GETCHILDREN] == 2
assert stats.global_stats.watches == 2
def test_connects():
zkt, stats = default_zkt()
consume_packets('connects', zkt)
assert stats.global_stats.by_op_counters[OpCodes.CONNECT] == 3
assert stats.global_stats.by_op_counters[OpCodes.CLOSE] == 3
def test_multi():
zkt, stats = default_zkt()
consume_packets('multi', zkt)
assert stats.global_stats.by_op_counters[OpCodes.MULTI] == 1
assert stats.by_path["/foo"].ops_written == 1
| Yasumoto/zktraffic | zktraffic/tests/test_packets.py | Python | apache-2.0 | 3,523 |
# -*- coding: utf-8 -*-
from functools import wraps
from django.template import loader, TemplateSyntaxError
from django.utils.datastructures import SortedDict
from cmsplugin_text_ng.templatetags.text_ng_tags import DefineNode
from cmsplugin_text_ng.type_registry import get_type
def ensure_template_arg(func):
def _dec(template):
if isinstance(template, basestring):
template = loader.get_template(template)
return func(template)
return wraps(func)(_dec)
def _get_nodelist(tpl):
if hasattr(tpl, 'template'):
return tpl.template.nodelist
else:
return tpl.nodelist
@ensure_template_arg
def get_variables_from_template(template):
variable_nodes = [n for n in _get_nodelist(template) if isinstance(n, DefineNode)]
variables = SortedDict()
for node in variable_nodes:
if node.variable_name in variables:
raise TemplateSyntaxError('%s defined multiple times - %s' % (
node.variable_name,
_get_template_name_from_node(node)
))
try:
variables[node.variable_name] = {
'type': get_type(node.variable_type),
'optional': node.optional,
'initial_field_values': node.initial_field_values,
}
except KeyError:
raise TemplateSyntaxError('%s type not registered - %s' % (
node.variable_type,
_get_template_name_from_node(node)
))
return variables
def _get_template_name_from_node(node):
try:
return node.source[0].name
except Exception:
return ''
| 360youlun/cmsplugin-text-ng | cmsplugin_text_ng/utils.py | Python | bsd-3-clause | 1,647 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import os
import edx_theme
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'Open edX REST APIs'
copyright = edx_theme.COPYRIGHT
author = edx_theme.AUTHOR
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinxcontrib.openapi',
]
# Prefix document path to section labels, otherwise autogenerated labels would look like 'heading'
# rather than 'path/to/file:heading'
autosectionlabel_prefix_document = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'edx_theme'
html_theme_path = [edx_theme.get_html_theme_path()]
html_theme_options = {'navigation_depth': 3}
html_favicon = os.path.join(edx_theme.get_html_theme_path(), 'edx_theme', 'static', 'css', 'favicon.ico')
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'api-docsdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'api-docs.tex', u'api-docs Documentation',
u'Nobody', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'api-docs', u'api-docs Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'api-docs', u'api-docs Documentation',
author, 'api-docs', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| edx-solutions/edx-platform | docs/api/conf.py | Python | agpl-3.0 | 5,561 |
"""
Functions for Report generation
"""
import numpy as np
import pandas as pd
import os
from plotly.offline import plot
import plotly.graph_objs as go
from cea.constants import HOURS_IN_YEAR
__author__ = "Gabriel Happle"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Gabriel Happle", "Daren Thomas"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
def full_report_to_xls(tsd, output_folder, basename):
""" this function is to write a full report to an ``*.xls`` file containing all intermediate and final results of a
single building thermal loads calculation"""
df = pd.DataFrame(tsd)
# Create a Pandas Excel writer using XlsxWriter as the engine.
#timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
#output_path = os.path.join(output_folder,"%(basename)s-%(timestamp)s.xls" % locals())
output_path = os.path.join(output_folder, "%(basename)s.xls" % locals())
writer = pd.ExcelWriter(output_path, engine='xlwt')
df.to_excel(writer, na_rep='NaN')
# Close the Pandas Excel writer and output the Excel file.
writer.save()
writer.close()
def quick_visualization_tsd(tsd, output_folder, basename):
# import keys
from cea.demand.thermal_loads import TSD_KEYS_HEATING_LOADS, TSD_KEYS_HEATING_TEMP, TSD_KEYS_RC_TEMP, \
TSD_KEYS_COOLING_LOADS, TSD_KEYS_MOISTURE, TSD_KEYS_VENTILATION_FLOWS, TSD_KEYS_COOLING_SUPPLY_TEMP, \
TSD_KEYS_COOLING_SUPPLY_FLOWS
# set to True to produce plotly graphs of selected variables
plot_heat_load = True
plot_heat_temp = True
plot_cool_load = True
plot_cool_moisture = True
plot_cool_air = True
plot_cool_sup = True
auto_open = False
if plot_heat_load:
filename = os.path.join(output_folder, "heat-load-{}.html").format(basename)
traces = []
for key in TSD_KEYS_HEATING_LOADS:
y = tsd[key][50:150]
trace = go.Scattergl(x=np.linspace(1, 100, 100), y=y, name=key, mode='lines+markers')
traces.append(trace)
fig = go.Figure(data=traces)
plot(fig, filename=filename, auto_open=auto_open)
if plot_heat_temp:
filename = os.path.join(output_folder, "heat-temp-{}.html").format(basename)
traces = []
keys = []
keys.extend(TSD_KEYS_HEATING_TEMP)
keys.extend(TSD_KEYS_RC_TEMP)
for key in keys:
y = tsd[key][50:150]
trace = go.Scattergl(x=np.linspace(1, 100, 100), y=y, name=key, mode='lines+markers')
traces.append(trace)
fig = go.Figure(data=traces)
plot(fig, filename=filename, auto_open=auto_open)
if plot_cool_load:
filename = os.path.join(output_folder, "cool-load-{}.html").format(basename)
traces = []
for key in TSD_KEYS_COOLING_LOADS:
y = tsd[key]
trace = go.Scattergl(x=np.linspace(1, HOURS_IN_YEAR, HOURS_IN_YEAR), y=y, name=key, mode='lines+markers')
traces.append(trace)
fig = go.Figure(data=traces)
plot(fig, filename=filename, auto_open=auto_open)
if plot_cool_moisture:
filename = os.path.join(output_folder, "cool-moisture-{}.html").format(basename)
traces = []
for key in TSD_KEYS_MOISTURE:
y = tsd[key]
trace = go.Scattergl(x=np.linspace(1, HOURS_IN_YEAR, HOURS_IN_YEAR), y=y, name=key, mode='lines+markers')
traces.append(trace)
fig = go.Figure(data=traces)
plot(fig, filename=filename, auto_open=auto_open)
if plot_cool_air:
filename = os.path.join(output_folder, "cool-air-{}.html").format(basename)
traces = []
for key in TSD_KEYS_VENTILATION_FLOWS:
y = tsd[key]
trace = go.Scattergl(x=np.linspace(1, HOURS_IN_YEAR, HOURS_IN_YEAR), y=y, name=key, mode='lines+markers')
traces.append(trace)
fig = go.Figure(data=traces)
plot(fig, filename=filename, auto_open=auto_open)
if plot_cool_sup:
filename = os.path.join(output_folder, "cool-sup-{}.html").format(basename)
traces = []
keys = []
keys.extend(TSD_KEYS_COOLING_SUPPLY_TEMP)
keys.extend(TSD_KEYS_COOLING_SUPPLY_FLOWS)
for key in keys:
y = tsd[key]
trace = go.Scattergl(x=np.linspace(1, HOURS_IN_YEAR, HOURS_IN_YEAR), y=y, name=key, mode='lines+markers')
traces.append(trace)
fig = go.Figure(data=traces)
plot(fig, filename=filename, auto_open=auto_open) | architecture-building-systems/CEAforArcGIS | cea/utilities/reporting.py | Python | mit | 4,645 |
import os
import time
from slackclient import SlackClient
import google_places as gp
import google_maps as gm
import api_ai
import pprint
#from environ variable
BOT_ID = os.environ.get("ROUTE_PLANNER_ID")
#constants
AT_BOT = "<@" + BOT_ID + ">"
BOT_SESSION_ID = "route_planner_bot"
#instantiate Slack client
slack_client = SlackClient(os.environ.get('ROUTE_PLANNER_TOKEN'))
def handle_command(command, channel):
"""
Receives commands directed at the bot and determines if they
are valid commands. If so, then acts on the commands. If not,
returns back what it needs for clarification.
"""
response = "I'm not sure what you mean. Can you please repeat that?\n"
#if command.startswith(CLOSEST_ROUTE):
apiai_query = command
print command
apiai_resp = api_ai.query_apiai(apiai_query, BOT_SESSION_ID)
pprint.pprint(apiai_resp)
response = unicode(apiai_resp['result']['fulfillment']['speech'])
if apiai_resp['result']['metadata']['intentName'] == 'route_plan':
if apiai_resp['result']['actionIncomplete'] == False:
data = api_ai.parse_result(apiai_resp)
pprint.pprint(data)
#slack_client.api_call("chat.postMessage", channel=channel, text="activating gmaps", as_user=True)
print "activating google maps\n"
#response = "Starting address: {0}\n Ending address: {1}\n".format(data['start_addr'], data['end_addr'])
start_addr = data['start_addr']
end_addr = data['end_addr']
inter_addrs = list()
if 'inter_places' in data:
for stop in data['inter_places']:
inter_result = gp.get_gplaces_results(stop['place'], stop['city'], stop['state'])
if inter_result != None:
inter_addrs.append(inter_result['address'])
print inter_result['address']
inter_response = "The address of {0} in {1}, {2} is {3}\n".format(stop['place'], stop['city'], stop['state'], inter_result['address'])
slack_client.api_call("chat.postMessage", channel=channel, text=inter_response, as_user=True)
else:
print "There is no {0} in {1}, {2}\n".format(stop['place'], stop['city'], stop['state'])
route, cost = gm.get_path(start_addr, inter_addrs, end_addr)
addr_path_list = [start_addr]
addr_path_list.extend(inter_addrs)
addr_path_list.append(end_addr)
store_route = list()
for i in range(len(route)):
store_route.append(addr_path_list[route[i]])
response = "Here's a way to get to all places:\n"
for i in range(len(cost)):
response = response + "Go from {0} to {1}. Takes about {2} minutes\n".format(store_route[i], store_route[i+1], round(cost[i]))
slack_client.api_call("chat.postMessage", channel=channel, text=response, as_user=True)
return 1
elif apiai_resp['result']['metadata']['intentName'] == 'find_place':
if apiai_resp['result']['actionIncomplete'] == False:
data = api_ai.parse_result(apiai_resp)
#pprint.pprint(data)
gplaces_result = gp.get_gplaces_results(data['place'], data['city'], data['state'])
if gplaces_result == None:
response = "There's no {0} in {1},{2}".forma(data['place'], data['city'], data['state'])
else:
pprint.pprint(gplaces_result)
response = "Address: {0}".format(gplaces_result['address'])
else:
print "Intent not implemented\n"
slack_client.api_call("chat.postMessage", channel=channel, text=response, as_user=True)
return 0
def parse_slack_output(slack_rtm_output):
"""
The Slack Real Time Messaging API is an events firehose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'text' in output and AT_BOT in output['text']:
#return text after the @mention, whitespace removed
return output['text'].split(AT_BOT)[1].strip().lower(), \
output['channel']
return None, None
if __name__ == "__main__":
READ_WEBSOCKET_DELAY = 1
if slack_client.rtm_connect():
print "route_planner bot connected and running!\n"
while True:
command, channel = parse_slack_output(slack_client.rtm_read())
print command
if command and channel:
handle_command(command, channel)
time.sleep(READ_WEBSOCKET_DELAY)
else:
print "Connection failed. Invalid Slack token or Bot ID?\n"
| mingtaiha/n.ai | slackbots/route_planner/route_planner.py | Python | mit | 4,949 |
from jinja2.ext import babel_extract as extract_jinja2
import jinja_extensions as je
jinja_extensions = '''
jinja2.ext.do, jinja2.ext.with_,
climmob3.jinja_extensions.SnippetExtension
'''
# This function take badly formatted html with strings etc and make it beautiful
# generally remove surlus whitespace and kill \n this will break <code><pre>
# tags but they should not be being translated '''
def jinja2_cleaner(fileobj, *args, **kw):
# Take badly formatted html with strings etc before goes to the translation .pot file
# This code is based on CKAN code which is licensed as follows
#
# CKAN - Data Catalogue Software
# Copyright (C) 2007 Open Knowledge Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
kw['options']['extensions'] = jinja_extensions
raw_extract = extract_jinja2(fileobj, *args, **kw)
for lineno, func, message, finder in raw_extract:
if isinstance(message, basestring):
message = je.regularise_html(message)
elif message is not None:
message = (je.regularise_html(message[0])
,je.regularise_html(message[1]))
yield lineno, func, message, finder
# This custom extractor is to support customs tags in the babel jinja2 extractions. Otherwise the normal extract fail
def extract_climmob3(fileobj, *args, **kw):
# This code is based on CKAN code which is licensed as follows
#
# CKAN - Data Catalogue Software
# Copyright (C) 2007 Open Knowledge Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
fileobj.read()
output = jinja2_cleaner(fileobj, *args, **kw)
fileobj.seek(0)
return output | BioversityCostaRica/CLIMMOBNET_V3 | climmob3/extract.py | Python | gpl-3.0 | 3,010 |
"""
Django settings for demo project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j^kukd0jyjceva)_l&lm9_wvmq%+wyj#9)!wn(xagzl^di5c5b'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'demo.urls'
WSGI_APPLICATION = 'demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| WimpyAnalytics/pynt-of-django | demo/demo/settings.py | Python | bsd-3-clause | 2,036 |
import regex
import requests
from pympler import summary, muppy
import psutil
from pynab.db import db_session, Regex, Blacklist, engine
from pynab import log
import config
import db.regex as regex_data
class Match(object):
"""Holds a regex match result so we can use it in chained if statements."""
def __init__(self):
self.match_obj = None
def match(self, *args, **kwds):
self.match_obj = regex.search(*args, **kwds)
return self.match_obj is not None
def update_blacklist():
"""Check for Blacklist update and load them into db."""
blacklist_url = config.postprocess.get('blacklist_url')
if blacklist_url:
response = requests.get(blacklist_url)
lines = response.text.splitlines()
blacklists = []
for line in lines:
elements = line.split('\t\t')
if len(elements) == 4:
blacklists.append({
'group_name': elements[0],
'regex': elements[1],
'description': elements[3],
'status': False
})
engine.execute(Blacklist.__table__.insert(), blacklists)
return True
else:
log.error('No blacklist update url in config.')
return False
def update_regex():
"""Check for NN+ regex update and load them into db."""
with db_session() as db:
regex_url = config.postprocess.get('regex_url')
if regex_url:
response = requests.get(regex_url)
lines = response.text.splitlines()
# get the revision by itself
first_line = lines.pop(0)
revision = regex.search('\$Rev: (\d+) \$', first_line)
if revision:
revision = int(revision.group(1))
log.info('Regex at revision: {:d}'.format(revision))
# and parse the rest of the lines, since they're an sql dump
regexes = {}
for line in lines:
reg = regex.search('\((\d+), \'(.*)\', \'(.*)\', (\d+), (\d+), (.*), (.*)\);$', line)
if reg:
try:
if reg.group(6) == 'NULL':
description = ''
else:
description = reg.group(6).replace('\'', '')
regexes[int(reg.group(1))] = {
'id': int(reg.group(1)),
'group_name': reg.group(2),
'regex': reg.group(3).replace('\\\\', '\\'),
'ordinal': int(reg.group(4)),
'status': bool(reg.group(5)),
'description': description
}
except:
log.error('Problem importing regex dump.')
return False
# if the parsing actually worked
if len(regexes) > 0:
curr_total = db.query(Regex).filter(Regex.id <= 100000).count()
change = len(regexes) - curr_total
# this will show a negative if we add our own, but who cares for the moment
log.info('Retrieved {:d} regexes, {:d} new.'.format(len(regexes), change))
ids = []
regexes = modify_regex(regexes)
for reg in regexes.values():
r = Regex(**reg)
ids.append(r.id)
db.merge(r)
log.info('Added/modified {:d} regexes from Newznab\'s collection'.format(len(regexes)))
removed = db.query(Regex).filter(~Regex.id.in_(ids)).filter(Regex.id <= 100000).update(
{Regex.status: False}, synchronize_session='fetch')
log.info('Disabled {:d} removed regexes.'.format(removed))
# add pynab regex
for reg in regex_data.additions:
r = Regex(**reg)
db.merge(r)
log.info('Added/modified {:d} Pynab regexes.'.format(len(regex_data.additions)))
db.commit()
return True
else:
log.error('No config item set for regex_url - do you own newznab plus?')
return False
def modify_regex(regexes):
for key, replacement in regex_data.replacements.items():
regexes[key] = replacement
return regexes
# both from: http://www.mobify.com/blog/sqlalchemy-memory-magic/
def get_virtual_memory_usage_kb():
"""The process's current virtual memory size in Kb, as a float."""
return float(psutil.Process().memory_info()[1]) / 1024.0
def memory_usage(where):
"""Print out a basic summary of memory usage."""
mem_summary = summary.summarize(muppy.get_objects())
log.debug("Memory summary: {}".format(where))
summary.print_(mem_summary, limit=2)
log.debug("VM: {:2f}Mb".format(get_virtual_memory_usage_kb() / 1024.0))
def smart_truncate(content, length, suffix=''):
return content if len(content) <= length else content[:length-len(suffix)].rsplit(' ', 1)[0] + suffix | Herkemer/pynab | pynab/util.py | Python | gpl-2.0 | 5,121 |
__version__= '1.1.1'
INFO_URL = 'https://github.com/ikreymer/webarchiveplayer'
| machawk1/webarchiveplayer | archiveplayer/version.py | Python | gpl-3.0 | 79 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import errno
import logging as std_logging
import os
import random
import signal
import sys
import time
try:
# Importing just the symbol here because the io module does not
# exist in Python 2.6.
from io import UnsupportedOperation # noqa
except ImportError:
# Python 2.6
UnsupportedOperation = None
import eventlet
from eventlet import event
from oslo_config import cfg
from openstack.common import eventlet_backdoor
from openstack.common.gettextutils import _LE, _LI, _LW
from openstack.common import importutils
from openstack.common import log as logging
from openstack.common import systemd
from openstack.common import threadgroup
rpc = importutils.try_import('monasca.openstack.common.rpc')
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _sighup_supported():
return hasattr(signal, 'SIGHUP')
def _is_daemon():
# The process group for a foreground process will match the
# process group of the controlling terminal. If those values do
# not match, or ioctl() fails on the stdout file handle, we assume
# the process is running in the background as a daemon.
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
try:
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
except OSError as err:
if err.errno == errno.ENOTTY:
# Assume we are a daemon because there is no terminal.
is_daemon = True
else:
raise
except UnsupportedOperation:
# Could not get the fileno for stdout, so we must be a daemon.
is_daemon = True
return is_daemon
def _is_sighup_and_daemon(signo):
if not (_sighup_supported() and signo == signal.SIGHUP):
# Avoid checking if we are a daemon, because the signal isn't
# SIGHUP.
return False
return _is_daemon()
def _signo_to_signame(signo):
signals = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}
if _sighup_supported():
signals[signal.SIGHUP] = 'SIGHUP'
return signals[signo]
def _set_signals_handler(handler):
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
if _sighup_supported():
signal.signal(signal.SIGHUP, handler)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self.services = Services()
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
service.backdoor_port = self.backdoor_port
self.services.add(service)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
self.services.stop()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
self.services.wait()
def restart(self):
"""Reload config files and restart service.
:returns: None
"""
cfg.CONF.reload_config_files()
self.services.restart()
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
raise SignalExit(signo)
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _wait_for_exit_or_signal(self, ready_callback=None):
status = None
signo = 0
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
if ready_callback:
ready_callback()
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
finally:
self.stop()
if rpc:
try:
rpc.cleanup()
except Exception:
# We're shutting down, so it doesn't matter at this point.
LOG.exception(_LE('Exception during rpc cleanup.'))
return status, signo
def wait(self, ready_callback=None):
systemd.notify_once()
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal(ready_callback)
if not _is_sighup_and_daemon(signo):
return status
self.restart()
class ServiceWrapper(object):
def __init__(self, service, workers):
self.service = service
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self, wait_interval=0.01):
"""Constructor.
:param wait_interval: The interval to sleep for between checks
of child process exit.
"""
self.children = {}
self.sigcaught = None
self.running = True
self.wait_interval = wait_interval
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process_handle_signal(self):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
def _sighup(*args):
signal.signal(signal.SIGHUP, signal.SIG_DFL)
raise SignalExit(signal.SIGHUP)
signal.signal(signal.SIGTERM, _sigterm)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _child_wait_for_exit_or_signal(self, launcher):
status = 0
signo = 0
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
try:
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Child caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_LE('Unhandled exception'))
status = 2
finally:
launcher.stop()
return status, signo
def _child_process(self, service):
self._child_process_handle_signal()
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.launch_service(service)
return launcher
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_LI('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
launcher = self._child_process(wrap.service)
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(launcher)
if not _is_sighup_and_daemon(signo):
break
launcher.restart()
os._exit(status)
LOG.info(_LI('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_LI('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
# Don't block if no child processes have exited
pid, status = os.waitpid(0, os.WNOHANG)
if not pid:
return None
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code))
if pid not in self.children:
LOG.warning(_LW('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def _respawn_children(self):
while self.running:
wrap = self._wait_child()
if not wrap:
# Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346)
eventlet.greenthread.sleep(self.wait_interval)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
systemd.notify_once()
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
while True:
self.handle_signal()
self._respawn_children()
# No signal means that stop was called. Don't clean up here.
if not self.sigcaught:
return
signame = _signo_to_signame(self.sigcaught)
LOG.info(_LI('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
self.stop()
def stop(self):
"""Terminate child processes and wait on each."""
self.running = False
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts."""
def __init__(self, threads=1000):
self.tg = threadgroup.ThreadGroup(threads)
# signal that the service is done shutting itself down:
self._done = event.Event()
def reset(self):
# NOTE(Fengqian): docs for Event.reset() recommend against using it
self._done = event.Event()
def start(self):
pass
def stop(self):
self.tg.stop()
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():
self._done.send()
def wait(self):
self._done.wait()
class Services(object):
def __init__(self):
self.services = []
self.tg = threadgroup.ThreadGroup()
self.done = event.Event()
def add(self, service):
self.services.append(service)
self.tg.add_thread(self.run_service, service, self.done)
def stop(self):
# wait for graceful shutdown of services:
for service in self.services:
service.stop()
service.wait()
# Each service has performed cleanup, now signal that the run_service
# wrapper threads can now die:
if not self.done.ready():
self.done.send()
# reap threads:
self.tg.stop()
def wait(self):
self.tg.wait()
def restart(self):
self.stop()
self.done = event.Event()
for restart_service in self.services:
restart_service.reset()
self.tg.add_thread(self.run_service, restart_service, self.done)
@staticmethod
def run_service(service, done):
"""Service start wrapper.
:param service: service to run
:param done: event to wait on until a shutdown is triggered
:returns: None
"""
service.start()
done.wait()
def launch(service, workers=1):
if workers is None or workers == 1:
launcher = ServiceLauncher()
launcher.launch_service(service)
else:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
return launcher
| biirdy/monasca-anomaly | openstack/common/service.py | Python | apache-2.0 | 15,575 |
############# ignore this bit for now #############
from microbit import *
def get_message(times_pressed):
############# edit below #############
# times_pressed is a variable that holds an integer telling you how often
# button A was pressed. Your task is to create a message that is going to
# be displayed on the microbit
message = 'My message'
############# edit above #############
return message
while True:
presses = button_a.get_presses()
display.scroll(get_message(presses)) | mathisgerdes/microbit-macau | Session 1 - Programing with Python/python-intro/2_logics.py | Python | mit | 536 |
# -*- coding: utf-8 -*-
"""
Catch-up TV & More
Copyright (C) 2018 SylvainCecchetto
This file is part of Catch-up TV & More.
Catch-up TV & More is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Catch-up TV & More is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with Catch-up TV & More; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# The unicode_literals import only has
# an effect on Python 2.
# It makes string literals as unicode like in Python 3
from __future__ import unicode_literals
from __future__ import division
from builtins import str
from resources.lib.py_utils import old_div
from codequick import Route, Resolver, Listitem, utils, Script
from resources.lib import web_utils
from resources.lib.kodi_utils import get_kodi_version
from resources.lib import download
from resources.lib.menu_utils import item_post_treatment
from resources.lib.kodi_utils import get_selected_item_art, get_selected_item_label, get_selected_item_info
import inputstreamhelper
import json
import re
import urlquick
from kodi_six import xbmc
from kodi_six import xbmcgui
# TO DO
URL_ROOT = 'https://www.questod.co.uk'
URL_SHOWS = URL_ROOT + '/api/shows/%s'
# mode
URL_SHOWS_AZ = URL_ROOT + '/api/shows%s'
# mode
URL_VIDEOS = URL_ROOT + '/api/show-detail/%s'
# showId
URL_STREAM = URL_ROOT + '/api/video-playback/%s'
# path
URL_LIVE = 'https://www.questod.co.uk/channel/%s'
URL_LICENCE_KEY = 'https://lic.caas.conax.com/nep/wv/license|Content-Type=&User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3041.0 Safari/537.36&PreAuthorization=%s&Host=lic.caas.conax.com|R{SSM}|'
# videoId
CATEGORIES_MODE = {
'FEATURED': 'featured',
'MOST POPULAR': 'most-popular',
'NEW': 'new',
'LEAVING SOON': 'last-chance'
}
CATEGORIES_MODE_AZ = {'A-Z': '-az'}
@Route.register
def list_categories(plugin, item_id, **kwargs):
"""
Build categories listing
- Tous les programmes
- Séries
- Informations
- ...
"""
for category_name_title, category_name_value in list(CATEGORIES_MODE.items(
)):
item = Listitem()
item.label = category_name_title
item.set_callback(list_programs_mode,
item_id=item_id,
category_name_value=category_name_value)
item_post_treatment(item)
yield item
for category_name_title, category_name_value in list(CATEGORIES_MODE_AZ.items(
)):
item = Listitem()
item.label = category_name_title
item.set_callback(list_programs_mode_az,
item_id=item_id,
category_name_value=category_name_value)
item_post_treatment(item)
yield item
@Route.register
def list_programs_mode(plugin, item_id, category_name_value, **kwargs):
"""
Build programs listing
- Les feux de l'amour
- ...
"""
resp = urlquick.get(URL_SHOWS % category_name_value)
json_parser = json.loads(resp.text)
for program_datas in json_parser["items"]:
program_title = program_datas["title"]
program_id = program_datas["id"]
program_image = ''
if 'image' in program_datas:
program_image = program_datas["image"]["src"]
item = Listitem()
item.label = program_title
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(list_program_seasons,
item_id=item_id,
program_id=program_id)
item_post_treatment(item)
yield item
@Route.register
def list_programs_mode_az(plugin, item_id, category_name_value, **kwargs):
"""
Build programs listing
- Les feux de l'amour
- ...
"""
resp = urlquick.get(URL_SHOWS_AZ % category_name_value)
json_parser = json.loads(resp.text)
for program_datas_letter in json_parser["items"]:
for program_datas in program_datas_letter["items"]:
program_title = program_datas["title"]
program_id = program_datas["id"]
item = Listitem()
item.label = program_title
item.set_callback(list_program_seasons,
item_id=item_id,
program_id=program_id)
item_post_treatment(item)
yield item
@Route.register
def list_program_seasons(plugin, item_id, program_id, **kwargs):
"""
Build programs listing
- Season 1
- ...
"""
resp = urlquick.get(URL_VIDEOS % program_id)
json_parser = json.loads(resp.text)
for program_season_datas in json_parser["show"]["seasonNumbers"]:
program_season_name = 'Season - ' + str(program_season_datas)
program_season_number = program_season_datas
item = Listitem()
item.label = program_season_name
item.set_callback(list_videos,
item_id=item_id,
program_id=program_id,
program_season_number=program_season_number)
item_post_treatment(item)
yield item
@Route.register
def list_videos(plugin, item_id, program_id, program_season_number, **kwargs):
resp = urlquick.get(URL_VIDEOS % program_id)
json_parser = json.loads(resp.text)
at_least_one_item = False
if 'episode' in json_parser["videos"]:
if str(program_season_number) in json_parser["videos"]["episode"]:
for video_datas in json_parser["videos"]["episode"][str(
program_season_number)]:
at_least_one_item = True
video_title = video_datas["title"]
video_duration = old_div(int(video_datas["videoDuration"]), 1000)
video_plot = video_datas["description"]
video_image = video_datas["image"]["src"]
video_id = video_datas["path"]
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.art['fanart'] = video_image
item.info["plot"] = video_plot
item.info["duration"] = video_duration
item.set_callback(get_video_url,
item_id=item_id,
video_id=video_id)
item_post_treatment(item,
is_playable=True,
is_downloadable=True)
yield item
if not at_least_one_item:
plugin.notify(plugin.localize(30718), '')
yield False
@Resolver.register
def get_video_url(plugin,
item_id,
video_id,
download_mode=False,
**kwargs):
resp = urlquick.get(URL_STREAM % video_id, max_age=-1)
json_parser = json.loads(resp.text)
if 'error' in json_parser:
if json_parser["error"] is not None:
if json_parser["error"]["status"] == '403':
plugin.notify('ERROR', plugin.localize(30713))
else:
plugin.notify('ERROR', plugin.localize(30716))
return False
if 'drmToken' in json_parser["playback"]:
if get_kodi_version() < 18:
xbmcgui.Dialog().ok('Info', plugin.localize(30602))
return False
is_helper = inputstreamhelper.Helper('mpd', drm='widevine')
if not is_helper.check_inputstream():
return False
if download_mode:
xbmcgui.Dialog().ok('Info', plugin.localize(30603))
return False
token = json_parser["playback"]["drmToken"]
item = Listitem()
item.path = json_parser["playback"]["streamUrlDash"]
item.label = get_selected_item_label()
item.art.update(get_selected_item_art())
item.info.update(get_selected_item_info())
item.property['inputstreamaddon'] = 'inputstream.adaptive'
item.property['inputstream.adaptive.manifest_type'] = 'mpd'
item.property[
'inputstream.adaptive.license_type'] = 'com.widevine.alpha'
item.property[
'inputstream.adaptive.license_key'] = URL_LICENCE_KEY % token
return item
else:
final_video_url = json_parser["playback"]["streamUrlHls"]
if download_mode:
return download.download_video(final_video_url)
return final_video_url
@Resolver.register
def get_live_url(plugin, item_id, **kwargs):
if get_kodi_version() < 18:
xbmcgui.Dialog().ok('Info', plugin.localize(30602))
return False
is_helper = inputstreamhelper.Helper('mpd', drm='widevine')
if not is_helper.check_inputstream():
return False
if item_id == 'questtv':
resp = urlquick.get(URL_LIVE % 'quest', max_age=-1)
elif item_id == 'questred':
resp = urlquick.get(URL_LIVE % 'quest-red', max_age=-1)
if len(re.compile(r'drmToken\"\:\"(.*?)\"').findall(resp.text)) > 0:
token = re.compile(r'drmToken\"\:\"(.*?)\"').findall(resp.text)[0]
if len(re.compile(r'streamUrlDash\"\:\"(.*?)\"').findall(
resp.text)) > 0:
live_url = re.compile(r'streamUrlDash\"\:\"(.*?)\"').findall(
resp.text)[0]
item = Listitem()
item.path = live_url
item.label = get_selected_item_label()
item.art.update(get_selected_item_art())
item.info.update(get_selected_item_info())
item.property['inputstreamaddon'] = 'inputstream.adaptive'
item.property['inputstream.adaptive.manifest_type'] = 'mpd'
item.property[
'inputstream.adaptive.license_type'] = 'com.widevine.alpha'
item.property[
'inputstream.adaptive.license_key'] = URL_LICENCE_KEY % token
return item
plugin.notify('ERROR', plugin.localize(30713))
return False
| SylvainCecchetto/plugin.video.catchuptvandmore | plugin.video.catchuptvandmore/resources/lib/channels/uk/questod.py | Python | gpl-2.0 | 10,514 |
import os
from flask import Flask, Response, request, redirect, url_for
from werkzeug import secure_filename
import urllib2
import boto
import boto.sqs
import boto.sqs.queue
from boto.sqs.message import Message
from boto.sqs.connection import SQSConnection
from boto.exception import SQSError
import sys
import json
from tempfile import mkdtemp
from subprocess import Popen, PIPE
app = Flask(__name__)
@app.route("/")
def index():
return """
Available API endpoints:
GET /queues List all queues
POST /queues Create a new queue
DELETE /queues/<id> Delete a specific queue
GET /queues/<id>/msgs Get a message, return it to the user
GET /queues/<qud>/msg/count Return the number of messages in a queue
POST /queues/<qid>/msgs Write a new message to a queue
DELETE /queues/<qid>/msgs Get and delete a message from the queue
"""
@app.route("/version", methods=['GET'])
def version():
"""
print boto version
curl -s -X GET localhost:5000/version
"""
print("Boto version: "+boto.Version+ "\n")
return "Boto version: "+boto.Version+ "\n"
@app.route("/queues", methods=['GET'])
def queues_index():
"""
List all queues
curl -s -X GET -H 'Accept: application /json' http://localhost:5000/queues | python -mjson.tool
curl -s -X GET -H 'Accept: application /json' 83.212.126.224/queues | python -mjson.tool
"""
all = []
conn = get_conn()
for q in conn.get_all_queues():
all.append(q.name)
resp = json.dumps(all)
return Response(response=resp, mimetype="application/json")
@app.route("/queues", methods=['POST'])
def queues_create():
"""
Create queue
curl -X POST -H 'Content-Type: application/json' http://localhost:5000/queues -d '{"name": "DamosQ"}'
curl -X POST -H 'Content-Type: application/json' 83.212.126.224:8080/queues -d '{"name": "DamosQ"}'
"""
conn = get_conn()
body = request.get_json(force=True)
name = body['name']
queue = conn.create_queue(name, 120)
resp = "Queue "+name+" has been created\n"
return Response(response=resp, mimetype="application/json")
@app.route("/queues/<name>", methods=['DELETE'])
def queues_remove(name):
"""
Delete queue
curl -X DELETE -H 'Accept: application/json' http://localhost:5000/queues/DamosQ
curl -X DELETE -H 'Accept: application/json' 83.212.126.224:8080/queues/DamosQ
"""
conn = get_conn()
queue = conn.get_queue(name)
conn.delete_queue(queue)
resp = "Queue "+name+" has been removed\n"
return Response(response=resp, mimetype="application/json")
@app.route("/queues/<name>/msgs/count", methods=['GET'])
def messages_count(name):
"""
Get message count for queue
curl -X GET -H 'Accept: application/json' http://localhost:5000/queues/DamosQ/msgs/count
curl -X GET -H 'Accept: application/json' 83.212.126.224:8080/queues/DamosQ/msgs/count
"""
conn = get_conn()
queue = conn.get_queue(name)
count = queue.count()
resp = "Queue "+name+" has "+str(count)+" messages\n"
return Response(response=resp, mimetype="application/json")
@app.route("/queues/<name>/msgs", methods=['POST'])
def messages_write(name):
"""
Writee message to queue
curl -s -X POST -H 'Accept: application/json' http://localhost:5000/queues/DamosQ/msgs -d '{"content": "this is the queue message"}'
curl -s -X POST -H 'Accept: application/json' 83.212.126.224:8080/queues/DamosQ/msgs -d '{"content": "this is the queue message"}'
"""
body = request.get_json(force=True)
messageText = body['content']
conn = get_conn()
queue = conn.get_queue(name)
queue.set_message_class(Message)
m = Message()
m.set_body(messageText)
queue.write(m)
resp = "Message "+messageText+" has been written to queue "+name+"\n"
return Response(response=resp, mimetype="application/json")
@app.route("/queues/<name>/msgs", methods=['GET'])
def messages_read(name):
"""
Get message from queue
curl -X GET -H 'Accept: application/json' http://localhost:5000/queues/DamosQ/msgs
curl -X GET -H 'Accept: application/json' 83.212.126.224:8080/queues/DamosQ/msgs
"""
conn = get_conn()
queue = conn.get_queue(name)
messages = queue.get_messages()
if len(messages) > 0:
message = messages[0]
resp = "Queue: "+name+". \nMessage: "+ message.get_body()+"\n"
else:
resp = "No messages for queue "+name+"\n"
return Response(response=resp, mimetype="application/json")
@app.route("/queues/<name>/msgs", methods=['DELETE'])
def messages_consume(name):
"""
Consume message from queue
curl -X DELETE -H 'Accept: application/json' http://localhost:5000/queues/DamosQ/msgs
curl -X DELETE -H 'Accept: application/json' 83.212.126.224:8080/queues/DamosQ/msgs
"""
conn = get_conn()
queue = conn.get_queue(name)
messages = queue.get_messages()
if len(messages) > 0:
message = messages[0]
resp = "Queue: "+name+" \nDeleted message: "+ message.get_body()+" \n"
queue.delete_message(message)
else:
resp = "No messages for queue "+name+"\n"
return Response(response=resp, mimetype="application/json")
def get_conn():
key_id, secret_access_key = urllib2.urlopen("http://ec2-52-30-7-5.eu-west-1.compute.amazonaws.com:81/key").read().split(':')
return boto.sqs.connect_to_region("eu-west-1", aws_access_key_id=key_id ,aws_secret_access_key=secret_access_key)
if __name__ == "__main__":
app.run(host="0.0.0.0",port=5000, debug=True)
| dasheq/lastlab | my_application/server.py | Python | mit | 5,246 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import division
import pickle
import codecs
class AttrDict(dict):
"""
Dictionary whose elements can be accessed like attributes.
>>> d = AttrDict(x=1, y=2)
>>> d.x = 2
d => {'x': 2, 'y': 2}
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
# Pure magic
self.__dict__ = self
def setdefault(self, *args, **kwargs):
"""
Set default values for the given keys.
The entries whose key is already present in the dictionary are not modified.
Modify the dictionary in-place, and return a reference to self.
>>> d = AttrDict(x=1, y=2)
>>> d.setdefault(y=3, z=3)
>>> {'x': 2, 'y': 2, 'z': 3}
'z' does not exist and is inserted,
'y' already exists, it is not modified.
This method is still compatible with dict.setdefault:
>>> d = AttrDict(x=1, y=2)
>>> d.setdefault('z', 4)
>>> 4
>>> d.setdefault('y', 3)
>>> 2
"""
if args:
# For retro-compatibility with dict
return super(AttrDict, self).setdefault(*args)
for k, v in kwargs.items():
super(AttrDict, self).setdefault(k, v)
return self
def uopen(filename, mode='r', encoding='utf-8'):
return codecs.open(filename, mode, encoding)
def uprint(unicode_text):
print unicode_text.encode('utf-8')
def pickle_dump(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def pickle_load(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
def edit_distance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = list(range(len(s1) + 1))
for j, y in enumerate(s2):
new_distances = [j + 1]
for i, x in enumerate(s1):
if x == y:
new_distances.append(distances[i])
else:
new_distances.append(1 + min((distances[i],
distances[i + 1],
new_distances[-1])))
distances = new_distances
return distances[-1]
| eske/RLPE | utils.py | Python | apache-2.0 | 2,250 |
#encoding:utf-8
"""
pythoner.net
Copyright (C) 2013 PYTHONER.ORG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.conf.urls.defaults import *
urlpatterns = patterns('pm.views',
(r'^$','inbox'),
(r'^inbox/$','inbox'),
(r'^inbox/p(\d{1,10})/$','inbox'),
(r'^outbox/$','outbox'),
(r'^outbox/p(\d{1,10})/$','outbox'),
(r'^write/$','write'),
(r'delete/$','delete'),
(r'^(\d{1,10})/','detail'),
) | wangjun/pythoner.net | pythoner/pm/urls.py | Python | gpl-3.0 | 1,035 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
LayerBoardDialog
A QGIS plugin
This plugin displays a table with all the project layers and lets the user change some properties directly. It also aims to be a board showing usefull information on all layers, and export this information as CSV or PDF
-------------------
begin : 2015-05-21
git sha : $Format:%H$
copyright : (C) 2015 by Michaël DOUCHIN / 3liz
email : info@3liz.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtWidgets import QDialog
from .qgis_plugin_tools.resources import load_ui
FORM_CLASS = load_ui('layer_board_dialog_base.ui')
class LayerBoardDialog(QDialog, FORM_CLASS):
def __init__(self, parent=None):
"""Constructor."""
super(LayerBoardDialog, self).__init__(parent)
self.setupUi(self)
| 3liz/QgisLayerBoardPlugin | layer_board_dialog.py | Python | gpl-2.0 | 1,673 |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import cirq
import cirq_google
class OtherX(cirq.SingleQubitGate):
def _unitary_(self) -> np.ndarray:
return np.array([[0, 1], [1, 0]])
def _decompose_(self, qubits):
# Coverage explicitly ignored since we are checking that we don't
# run this line and fall into an infinite loop.
return OtherOtherX().on(*qubits) # coverage:ignore
class OtherOtherX(cirq.SingleQubitGate):
def _decompose_(self, qubits):
return OtherX().on(*qubits)
class NonNativeGate(cirq.SingleQubitGate):
pass
def test_avoids_infinite_cycle_when_matrix_available():
q = cirq.GridQubit(0, 0)
c = cirq.Circuit(OtherX().on(q), OtherOtherX().on(q))
cirq_google.ConvertToXmonGates().optimize_circuit(c)
cirq.testing.assert_has_diagram(c, '(0, 0): ───PhX(1)───PhX(1)───')
cirq.protocols.decompose(c)
q = cirq.GridQubit.rect(1, 3)
matrix_gate = cirq.MatrixGate(cirq.testing.random_unitary(2))
def test_bad_operation():
c = cirq.Circuit(NonNativeGate().on(q[0]))
with pytest.raises(TypeError):
cirq_google.ConvertToXmonGates().optimize_circuit(c)
@pytest.mark.parametrize(
'op, is_valid',
[
(cirq.CircuitOperation(cirq.FrozenCircuit(matrix_gate(q[0]))), False),
(matrix_gate(q[0]), True),
(matrix_gate(q[0]).with_tags('test_tags'), True),
(matrix_gate(q[0]).controlled_by(q[1]), True),
(matrix_gate(q[0]).controlled_by(q[1]).with_tags('test_tags'), True),
(matrix_gate(q[0]).with_tags('test_tags').controlled_by(q[1]), True),
],
)
def test_supported_operation(op, is_valid):
c = cirq.Circuit(op)
assert (cirq_google.ConvertToXmonGates().optimization_at(c, 0, op) is not None) == is_valid
| quantumlib/Cirq | cirq-google/cirq_google/optimizers/convert_to_xmon_gates_test.py | Python | apache-2.0 | 2,362 |
from panoramisk import utils
from panoramisk.exceptions import AGIException
def test_parse_agi_valid_result():
res = try_parse_agi_result('200 result=0')
assert res == {'msg': '', 'result': ('0', ''), 'status_code': 200}
res = try_parse_agi_result('200 result=1')
assert res == {'msg': '', 'result': ('1', ''), 'status_code': 200}
res = try_parse_agi_result('200 result=1234')
assert res == {'msg': '', 'result': ('1234', ''), 'status_code': 200}
res = try_parse_agi_result('200 result= (timeout)')
assert res == {'msg': '', 'result': ('', 'timeout'), 'status_code': 200}
def test_parse_agi_invalid_result():
res = try_parse_agi_result('510 Invalid or unknown command')
assert res == {'msg': '510 Invalid or unknown command',
'error': 'AGIInvalidCommand',
'status_code': 510}
res = try_parse_agi_result('520 Use this')
assert res == {'msg': '520 Use this',
'error': 'AGIUsageError',
'status_code': 520}
def try_parse_agi_result(result):
try:
res = utils.parse_agi_result(result)
except AGIException as err:
res = err.items
res['error'] = err.__class__.__name__
res['msg'] = err.args[0]
return res
| gawel/panoramisk | tests/test_utils.py | Python | mit | 1,276 |
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def spaces_in_column_names():
train_data = h2o.upload_file(path=pyunit_utils.locate("smalldata/jira/spaces_in_column_names.csv"))
train_data.show()
train_data.describe()
train_data["r e s p o n s e"] = train_data["r e s p o n s e"].asfactor()
X = ["p r e d i c t o r 1","predictor2","p r e d i ctor3","pre d ictor4","predictor5"]
gbm = H2OGradientBoostingEstimator(ntrees=1, distribution="bernoulli", min_rows=1)
gbm.train(x=X,y="r e s p o n s e", training_frame=train_data)
gbm.show()
if __name__ == "__main__":
pyunit_utils.standalone_test(spaces_in_column_names)
else:
spaces_in_column_names()
| YzPaul3/h2o-3 | h2o-py/tests/testdir_misc/pyunit_spaces_in_column_names.py | Python | apache-2.0 | 780 |
# Based on Rapptz's RoboDanny's repl cog
import contextlib
import inspect
import logging
import re
import sys
import textwrap
import traceback
from io import StringIO
from typing import *
from typing import Pattern
import discord
from discord.ext import commands
# i took this from somewhere and i cant remember where
md: Pattern = re.compile(r"^(([ \t]*`{3,4})([^\n]*)(?P<code>[\s\S]+?)(^[ \t]*\2))", re.MULTILINE)
logger = logging.getLogger(__name__)
class BotDebug(object):
def __init__(self, client: commands.Bot):
self.client = client
self.last_eval = None
@commands.command(hidden=True)
async def exec(self, ctx: commands.Context, *, cmd: str):
result, stdout, stderr = await self.run(ctx, cmd, use_exec=True)
await self.send_output(ctx, result, stdout, stderr)
@commands.command(hidden=True)
async def eval(self, ctx: commands.Context, *, cmd: str):
scope = {"_": self.last_eval, "last": self.last_eval}
result, stdout, stderr = await self.run(ctx, cmd, use_exec=False, extra_scope=scope)
self.last_eval = result
await self.send_output(ctx, result, stdout, stderr)
async def send_output(self, ctx: commands.Context, result: str, stdout: str, stderr: str):
print(result, stdout, stderr)
if result is not None:
await ctx.send(f"Result: `{result}`")
if stdout:
logger.info(f"exec stdout: \n{stdout}")
await ctx.send("stdout:")
await self.send_split(ctx, stdout)
if stderr:
logger.error(f"exec stderr: \n{stderr}")
await ctx.send("stderr:")
await self.send_split(ctx, stderr)
async def run(self, ctx: commands.Context, cmd: str, use_exec: bool, extra_scope: dict=None) -> Tuple[Any, str, str]:
if not self.client.is_owner(ctx.author):
return None, "", ""
# note: exec/eval inserts __builtins__ if a custom version is not defined (or set to {} or whatever)
scope: Dict[str, Any] = {'bot': self.client, 'ctx': ctx, 'discord': discord}
if extra_scope:
scope.update(extra_scope)
match: Match = md.match(cmd)
code: str = match.group("code").strip() if match else cmd.strip('` \n')
logger.info(f"Executing code '{code}'")
result = None
with std_redirect() as (stdout, stderr):
try:
if use_exec:
# wrap in async function to run in loop and allow await calls
func = f"async def run():\n{textwrap.indent(code, ' ')}"
exec(func, scope)
result = await scope['run']()
else:
result = eval(code, scope)
# eval doesn't allow `await`
if inspect.isawaitable(result):
result = await result
except (SystemExit, KeyboardInterrupt):
raise
except Exception:
await self.on_error(ctx)
else:
await ctx.message.add_reaction('✅')
return result, stdout.getvalue(), stderr.getvalue()
async def on_error(self, ctx: commands.Context):
# prepend a "- " to each line and use ```diff``` syntax highlighting to color the error message red.
# also strip lines 2 and 3 of the traceback which includes full path to the file, irrelevant for repl code.
# yes i know error[:1] is basically error[0] but i want it to stay as a list
logger.exception("Error in exec code")
error = traceback.format_exc().splitlines()
error = textwrap.indent('\n'.join(error[:1] + error[3:]), '- ', lambda x: True)
await ctx.send("Traceback:")
await self.send_split(ctx, error, prefix="```diff\n")
async def send_split(self, ctx: commands.Context, text: str, *, prefix="```\n", postfix="\n```"):
max_len = 2000 - (len(prefix) + len(postfix))
text: List[str] = [text[x:x + max_len] for x in range(0, len(text), max_len)]
print(text)
for message in text:
await ctx.send(f"{prefix}{message}{postfix}")
@contextlib.contextmanager
def std_redirect():
stdout = sys.stdout
stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
yield sys.stdout, sys.stderr
sys.stdout = stdout
sys.stderr = stderr
def init(bot: commands.Bot, cfg: dict):
bot.add_cog(BotDebug(bot))
| 10se1ucgo/LoLTrivia | plugins/debug.py | Python | mit | 4,487 |
"""
WSGI config for zulip project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
# Because import_module does not correctly handle safe circular imports we
# need to import zerver.models first before the middleware tries to import it.
import zerver.models
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| JanzTam/zulip | zproject/wsgi.py | Python | apache-2.0 | 1,178 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-01-12 11:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_auto_20170112_1045'),
]
operations = [
migrations.AlterField(
model_name='photodb',
name='aspect',
field=models.FloatField(blank=True, null=True),
),
]
| BOOLRon/lovelypic | core/migrations/0007_auto_20170112_1119.py | Python | mit | 456 |
#!/usr/bin/python
# Initial author: Solaiappan Manimaran
# PathoMap performs the alignment through wrappers for each type of aligners.
# Pathoscope - Predicts strains of genomes in Nextgen seq alignment file (sam/bl8)
# Copyright (C) 2013 Johnson Lab - Boston University and Crandall Lab George Washington University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, sys, math, shutil
pathoscopedir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0,pathoscopedir)
from pathoscope.pathomap.bowtie2wrapper import bowtie2Wrap
from pathoscope.utils import seqParse
# ===========================================================
class PathoMapOptions:
MAX_REF_FILE_SIZE = 4.3e9
verbose = False
outDir = "."
indexDir = "."
numThreads = 8
outAlignFile = "outalign.sam"
inReadFile = ""
inReadFilePair1 = ""
inReadFilePair2 = ""
targetRefFiles = []
filterRefFiles = []
targetIndexPrefixes = []
filterIndexPrefixes = []
targetAlignFiles = []
filterAlignFiles = []
targetAlignParameters = None
filterAlignParameters = None
btHome = None
exp_tag = ""
# Main entry function to PathoMap that does all the processing
def processPathoMap(pathoMapOptions):
procPathoMapOptions = copyPathoMapOptions(pathoMapOptions)
# Splitting reference files if bigger than MAX_REF_FILE_SIZE
ptargetRefFiles = []
for filePath in pathoMapOptions.targetRefFiles:
if pathoMapOptions.verbose:
print "Checking whether the file: " + filePath + " needs to be split"
files = splitCheck(filePath, pathoMapOptions.MAX_REF_FILE_SIZE);
for f in files:
ptargetRefFiles.append(f)
procPathoMapOptions.targetRefFiles = ptargetRefFiles
pfilterRefFiles = []
for filePath in pathoMapOptions.filterRefFiles:
if pathoMapOptions.verbose:
print "Checking whether the file: " + filePath + " needs to be split"
files = splitCheck(filePath, pathoMapOptions.MAX_REF_FILE_SIZE);
for f in files:
pfilterRefFiles.append(f)
procPathoMapOptions.filterRefFiles = pfilterRefFiles
# Creating Index if it does not exist
bowtie2Options = bowtie2Wrap.Bowtie2Options()
bowtie2Options.verbose = procPathoMapOptions.verbose
bowtie2Options.btHome = procPathoMapOptions.btHome
bowtie2Options.indexDir = procPathoMapOptions.indexDir
for filePath in ptargetRefFiles:
bowtie2Options.refFile = filePath
(_, tail) = os.path.split(filePath)
(base, _) = os.path.splitext(tail)
bowtie2Options.btIndexPrefix = base
if pathoMapOptions.verbose:
print "Creating bowtie2 index for: " + filePath
bowtie2Wrap.create_bowtie2_index(bowtie2Options)
procPathoMapOptions.targetIndexPrefixes.append(base)
for filePath in pfilterRefFiles:
bowtie2Options.refFile = filePath
(_, tail) = os.path.split(filePath)
(base, _) = os.path.splitext(tail)
bowtie2Options.btIndexPrefix = base
if pathoMapOptions.verbose:
print "Creating bowtie2 index for: " + filePath
bowtie2Wrap.create_bowtie2_index(bowtie2Options)
procPathoMapOptions.filterIndexPrefixes.append(base)
# Creating the Alignment file
bowtie2Options = bowtie2Wrap.Bowtie2Options()
bowtie2Options.verbose = procPathoMapOptions.verbose
bowtie2Options.btHome = procPathoMapOptions.btHome
bowtie2Options.numThreads = procPathoMapOptions.numThreads
bowtie2Options.outDir = procPathoMapOptions.outDir
bowtie2Options.indexDir = procPathoMapOptions.indexDir
bowtie2Options.readFile = procPathoMapOptions.inReadFile
bowtie2Options.readFilePair1 = procPathoMapOptions.inReadFilePair1
bowtie2Options.readFilePair2 = procPathoMapOptions.inReadFilePair2
if (len(procPathoMapOptions.inReadFilePair1)>0 and
len(procPathoMapOptions.inReadFilePair2)>0 and
len(procPathoMapOptions.inReadFile)>0):
bowtie2Options.bothReadFlag = True # newly added
elif (len(procPathoMapOptions.inReadFilePair1)>0 and
len(procPathoMapOptions.inReadFilePair2)>0):
bowtie2Options.pairedReadFlag = True
elif (len(procPathoMapOptions.inReadFile)>0):
bowtie2Options.singleReadFlag = True # newly added
if procPathoMapOptions.targetAlignParameters is not None:
bowtie2Options.additionalOptions = procPathoMapOptions.targetAlignParameters
for indexPrefix in procPathoMapOptions.targetIndexPrefixes:
bowtie2Options.btIndexPrefix = procPathoMapOptions.indexDir + os.sep + indexPrefix
bowtie2Options.outAlignFile = procPathoMapOptions.exp_tag + indexPrefix + ".sam"
if pathoMapOptions.verbose:
print "Creating bowtie2 alignment: " + bowtie2Options.outAlignFile
bowtie2Wrap.run_bowtie2(bowtie2Options)
procPathoMapOptions.targetAlignFiles.append(procPathoMapOptions.outDir + os.sep +
bowtie2Options.outAlignFile)
# Appending the Alignment files and Filtering
if len(procPathoMapOptions.targetAlignFiles) > 1:
appendAlignFile = procPathoMapOptions.outDir + os.sep + procPathoMapOptions.exp_tag + "appendAlign.sam"
if pathoMapOptions.verbose:
print "Appending alignment files to: " + appendAlignFile
append_sam_file(appendAlignFile, procPathoMapOptions.targetAlignFiles)
else:
appendAlignFile = procPathoMapOptions.targetAlignFiles[0]
if len(procPathoMapOptions.filterIndexPrefixes) > 0:
bowtie2Options.readFile = procPathoMapOptions.outDir + os.sep + procPathoMapOptions.exp_tag + "appendAlign.fq"
bowtie2Options.readFilePair1 = ""
bowtie2Options.readFilePair2 = ""
bowtie2Options.bothReadFlag = False
bowtie2Options.pairedReadFlag = False
bowtie2Options.singleReadFlag = True
if procPathoMapOptions.filterAlignParameters is not None:
bowtie2Options.additionalOptions = procPathoMapOptions.filterAlignParameters
bowtie2Wrap.extractRead(appendAlignFile, bowtie2Options.readFile)
for indexPrefix in procPathoMapOptions.filterIndexPrefixes:
bowtie2Options.btIndexPrefix = procPathoMapOptions.indexDir + os.sep + indexPrefix
bowtie2Options.outAlignFile = procPathoMapOptions.exp_tag + indexPrefix + ".sam"
if pathoMapOptions.verbose:
print "Creating bowtie2 alignment: " + bowtie2Options.outAlignFile
bowtie2Wrap.run_bowtie2(bowtie2Options)
procPathoMapOptions.filterAlignFiles.append(procPathoMapOptions.outDir + os.sep +
bowtie2Options.outAlignFile)
# Filtering the Alignment file
outAlignFile = procPathoMapOptions.outDir + os.sep + procPathoMapOptions.outAlignFile
if pathoMapOptions.verbose:
print "Filtering and creating the alignment: " + outAlignFile
if len(procPathoMapOptions.filterAlignFiles) > 0:
filter_alignment(appendAlignFile, procPathoMapOptions.filterAlignFiles, outAlignFile)
elif ((len(procPathoMapOptions.targetAlignFiles) > 1) or \
(len(procPathoMapOptions.targetIndexPrefixes) > 0)):
os.rename(appendAlignFile, outAlignFile)
else: # Input appendAlignFile provided by user, hence make a copy for outAlignFile
shutil.copy(appendAlignFile, outAlignFile)
# Make a copy of core pathoMapOptions
def copyPathoMapOptions(pathoMapOptions):
procPathoMapOptions = PathoMapOptions()
procPathoMapOptions.verbose = pathoMapOptions.verbose
procPathoMapOptions.outDir = pathoMapOptions.outDir
procPathoMapOptions.indexDir = pathoMapOptions.indexDir
procPathoMapOptions.numThreads = pathoMapOptions.numThreads
procPathoMapOptions.outAlignFile = pathoMapOptions.outAlignFile
procPathoMapOptions.inReadFile = pathoMapOptions.inReadFile
procPathoMapOptions.inReadFilePair1 = pathoMapOptions.inReadFilePair1
procPathoMapOptions.inReadFilePair2 = pathoMapOptions.inReadFilePair2
procPathoMapOptions.targetRefFiles = pathoMapOptions.targetRefFiles
procPathoMapOptions.filterRefFiles = pathoMapOptions.filterRefFiles
procPathoMapOptions.targetIndexPrefixes = pathoMapOptions.targetIndexPrefixes
procPathoMapOptions.filterIndexPrefixes = pathoMapOptions.filterIndexPrefixes
procPathoMapOptions.targetAlignFiles = pathoMapOptions.targetAlignFiles
procPathoMapOptions.filterAlignFiles = pathoMapOptions.filterAlignFiles
procPathoMapOptions.targetAlignParameters = pathoMapOptions.targetAlignParameters
procPathoMapOptions.filterAlignParameters = pathoMapOptions.filterAlignParameters
procPathoMapOptions.btHome = pathoMapOptions.btHome
procPathoMapOptions.exp_tag = pathoMapOptions.exp_tag
return procPathoMapOptions
# If the given file size is greater than maxSize, then it splits
# and returns a list of split file paths where each file is less than maxSize
def splitCheck(filePath, maxSize):
files = []
fileSize = os.stat(filePath).st_size
nSplit = 1
if (fileSize > maxSize):
nSplit = int(math.ceil(1.0*fileSize/float(maxSize)))
if nSplit==1:
files.append(filePath)
return files
(base, ext) = os.path.splitext(filePath)
#check if we have already done this splitting
for i in range(nSplit):
fiPath=base+'_'+str(i)+ext
splitReq=False
if not os.path.exists(fiPath):
splitReq=True
break
fps = []
for i in range(nSplit):
fiPath=base+'_'+str(i)+ext
files.append(fiPath)
if splitReq:
fps.append(open(fiPath,'w'))
if splitReq:
with open(filePath,'r') as fp:
j=0
if ext=='.fq':
for r in seqParse.parse(fp,'fastq'):
fps[j%nSplit].write('>%s %s\n%s\n%s\n' % (r.id, r.description, r.seq, r.qual))
j+=1
else:
for r in seqParse.parse(fp,'fasta'):
fps[j%nSplit].write('>%s %s\n%s\n' % (r.id, r.description, r.seq))
j+=1
for i in range(nSplit):
fps[i].close()
return files
def filter_alignment(targetAlignFile, filterAlignFiles, outAlignFile):
return bowtie2Wrap.filter_alignment(targetAlignFile, filterAlignFiles, outAlignFile)
# Appends all the appendFiles to outfile
def append_file(outfile, appendFiles):
with open(outfile,'w') as out1:
for file1 in appendFiles:
if (file1 is not None):
with open(file1,'r') as in2:
for ln in in2:
out1.write(ln)
# Appends all the sam appendFiles to outfile
def append_sam_file(outfile, appendFiles):
with open(outfile,'w') as out1:
# First, writing the header by merging headers from all files
for file1 in appendFiles:
if (file1 is not None):
with open(file1,'r') as in2:
for ln in in2:
if ln[0] == '@':
out1.write(ln)
# Writing the body by merging body from all files
for file1 in appendFiles:
if (file1 is not None):
with open(file1,'r') as in2:
for ln in in2:
if ln[0] != '@':
out1.write(ln)
| PathoScope/PathoScope | pathoscope/pathomap/PathoMapA.py | Python | gpl-3.0 | 10,774 |
JE = 'je'
TU = 'tu'
ELLE = 'el'
IL = 'il'
IT = 'it'
NOUS = 'nu'
VOUS = 'vu'
ILS = 'is'
ELLES = 'es'
afa = {
# https://en.wikipedia.org/wiki/Afroasiatic_languages
'order': {'default': (1, 0), 'adj_nom': (0, 1)},
'order_triple': {'default': (0, 1, 2), },
}
bantu = {
'order': {'default': (0, 1), 'det_nom': (1, 0), 'adj_nom': (1, 0), 'n_p': (1, 0), 'pos_nom': (1, 0)},
'order_triple': {'default': (0, 1, 2)},
}
geez = {
'order': {'default': (1, 0), 'adj_nom': (0, 1)},
'order_triple': {'default': (0, 2, 1), },
}
kwa = {
# 'order': {'default': (1, 0), 'act_adv': (0, 1), 'n_p': (0, 1), 'pos_nom': (0, 1), 'nom_in': (0, 1), 'pre_nom': (0, 1), 'tu_adv': (0, 1), 'vous_adv': (0, 1)},
'order': {'default': (0, 1), 'adj_nom': (1, 0), 'det_nom': (1, 0), 'nom_nom': (1, 0), 'of_p': (1, 0), 'pre_det_nom': (1, 0), 'pre_pos_nom': (1, 0), 'question': {'act_adv': (0, 1)},},
'order_triple': {'default': (0, 1, 2), 'question': (2, 1, 0), },
}
latin = {
'order': {'default': (0, 1), 'question': {'act_adv': (1, 0)}, },
'order_triple': {'default': (0, 1, 2), 'question': (2, 1, 0)},
}
| kasahorow/kwl | data/sua.py | Python | bsd-2-clause | 1,094 |
# make importing these a bit less hassle
from flexget.utils.titles.series import SeriesParser, ID_TYPES # pylint: disable=unused-import
from flexget.utils.titles.movie import MovieParser # pylint: disable=unused-import
from flexget.utils.titles.parser import TitleParser # pylint: disable=unused-import
| qvazzler/Flexget | flexget/utils/titles/__init__.py | Python | mit | 306 |
"""Raw representations of every data type in the AWS ElastiCache service.
See Also:
`AWS developer guide for ElastiCache
<https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/index.html>`_
This file is automatically generated, and should not be directly edited.
"""
from typing import Any
from typing import Dict
from attr import attrib
from attr import attrs
from ..core import ATTRSCONFIG
from ..core import Resource
from ..core import ResourceProperties
from ..core import create_object_converter
__all__ = [
"CacheCluster",
"CacheClusterProperties",
"ParameterGroup",
"ParameterGroupProperties",
"ReplicationGroup",
"ReplicationGroupProperties",
"SecurityGroup",
"SecurityGroupProperties",
"SecurityGroupIngress",
"SecurityGroupIngressProperties",
"SubnetGroup",
"SubnetGroupProperties",
]
@attrs(**ATTRSCONFIG)
class CacheClusterProperties(ResourceProperties):
AutoMinorVersionUpgrade = attrib(default=None)
AZMode = attrib(default=None)
CacheNodeType = attrib(default=None)
CacheParameterGroupName = attrib(default=None)
CacheSecurityGroupNames = attrib(default=None)
CacheSubnetGroupName = attrib(default=None)
ClusterName = attrib(default=None)
Engine = attrib(default=None)
EngineVersion = attrib(default=None)
NotificationTopicArn = attrib(default=None)
NumCacheNodes = attrib(default=None)
Port = attrib(default=None)
PreferredAvailabilityZone = attrib(default=None)
PreferredAvailabilityZones = attrib(default=None)
PreferredMaintenanceWindow = attrib(default=None)
SnapshotArns = attrib(default=None)
SnapshotName = attrib(default=None)
SnapshotRetentionLimit = attrib(default=None)
SnapshotWindow = attrib(default=None)
Tags = attrib(default=None)
VpcSecurityGroupIds = attrib(default=None)
@attrs(**ATTRSCONFIG)
class CacheCluster(Resource):
"""A Cache Cluster for ElastiCache.
See Also:
`AWS Cloud Formation documentation for CacheCluster
<http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-cache-cluster.html>`_
"""
RESOURCE_TYPE = "AWS::ElastiCache::CacheCluster"
Properties: CacheClusterProperties = attrib(
factory=CacheClusterProperties,
converter=create_object_converter(CacheClusterProperties),
)
@attrs(**ATTRSCONFIG)
class ParameterGroupProperties(ResourceProperties):
CacheParameterGroupFamily = attrib(default=None)
Description = attrib(default=None)
Properties = attrib(default=None)
@attrs(**ATTRSCONFIG)
class ParameterGroup(Resource):
"""A Parameter Group for ElastiCache.
See Also:
`AWS Cloud Formation documentation for ParameterGroup
<http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-parameter-group.html>`_
"""
RESOURCE_TYPE = "AWS::ElastiCache::ParameterGroup"
Properties: ParameterGroupProperties = attrib(
factory=ParameterGroupProperties,
converter=create_object_converter(ParameterGroupProperties),
)
@attrs(**ATTRSCONFIG)
class ReplicationGroupProperties(ResourceProperties):
AtRestEncryptionEnabled = attrib(default=None)
AuthToken = attrib(default=None)
AutomaticFailoverEnabled = attrib(default=None)
AutoMinorVersionUpgrade = attrib(default=None)
CacheNodeType = attrib(default=None)
CacheParameterGroupName = attrib(default=None)
CacheSecurityGroupNames = attrib(default=None)
CacheSubnetGroupName = attrib(default=None)
Engine = attrib(default=None)
EngineVersion = attrib(default=None)
KmsKeyId = attrib(default=None)
NodeGroupConfiguration = attrib(default=None)
NotificationTopicArn = attrib(default=None)
NumCacheClusters = attrib(default=None)
NumNodeGroups = attrib(default=None)
Port = attrib(default=None)
PreferredCacheClusterAZs = attrib(default=None)
PreferredMaintenanceWindow = attrib(default=None)
PrimaryClusterId = attrib(default=None)
ReplicasPerNodeGroup = attrib(default=None)
ReplicationGroupDescription = attrib(default=None)
ReplicationGroupId = attrib(default=None)
SecurityGroupIds = attrib(default=None)
SnapshotArns = attrib(default=None)
SnapshotName = attrib(default=None)
SnapshotRetentionLimit = attrib(default=None)
SnapshottingClusterId = attrib(default=None)
SnapshotWindow = attrib(default=None)
Tags = attrib(default=None)
TransitEncryptionEnabled = attrib(default=None)
@attrs(**ATTRSCONFIG)
class ReplicationGroup(Resource):
"""A Replication Group for ElastiCache.
See Also:
`AWS Cloud Formation documentation for ReplicationGroup
<http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticache-replicationgroup.html>`_
"""
RESOURCE_TYPE = "AWS::ElastiCache::ReplicationGroup"
Properties: ReplicationGroupProperties = attrib(
factory=ReplicationGroupProperties,
converter=create_object_converter(ReplicationGroupProperties),
)
# NB: UpdatePolicy may be set for ReplicationGroup
# (unlike most Resource types)
UpdatePolicy: Dict[str, Any] = attrib(factory=dict)
@attrs(**ATTRSCONFIG)
class SecurityGroupProperties(ResourceProperties):
Description = attrib(default=None)
@attrs(**ATTRSCONFIG)
class SecurityGroup(Resource):
"""A Security Group for ElastiCache.
See Also:
`AWS Cloud Formation documentation for SecurityGroup
<http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-security-group.html>`_
"""
RESOURCE_TYPE = "AWS::ElastiCache::SecurityGroup"
Properties: SecurityGroupProperties = attrib(
factory=SecurityGroupProperties,
converter=create_object_converter(SecurityGroupProperties),
)
@attrs(**ATTRSCONFIG)
class SecurityGroupIngressProperties(ResourceProperties):
CacheSecurityGroupName = attrib(default=None)
EC2SecurityGroupName = attrib(default=None)
EC2SecurityGroupOwnerId = attrib(default=None)
@attrs(**ATTRSCONFIG)
class SecurityGroupIngress(Resource):
"""A Security Group Ingress for ElastiCache.
See Also:
`AWS Cloud Formation documentation for SecurityGroupIngress
<http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-security-group-ingress.html>`_
"""
RESOURCE_TYPE = "AWS::ElastiCache::SecurityGroupIngress"
Properties: SecurityGroupIngressProperties = attrib(
factory=SecurityGroupIngressProperties,
converter=create_object_converter(SecurityGroupIngressProperties),
)
@attrs(**ATTRSCONFIG)
class SubnetGroupProperties(ResourceProperties):
CacheSubnetGroupName = attrib(default=None)
Description = attrib(default=None)
SubnetIds = attrib(default=None)
@attrs(**ATTRSCONFIG)
class SubnetGroup(Resource):
"""A Subnet Group for ElastiCache.
See Also:
`AWS Cloud Formation documentation for SubnetGroup
<http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-subnetgroup.html>`_
"""
RESOURCE_TYPE = "AWS::ElastiCache::SubnetGroup"
Properties: SubnetGroupProperties = attrib(
factory=SubnetGroupProperties,
converter=create_object_converter(SubnetGroupProperties),
)
| garyd203/flying-circus | src/flyingcircus/_raw/elasticache.py | Python | lgpl-3.0 | 7,383 |
try:
import unittest2 as unittest
except:
import unittest
import numpy as np
from hpsklearn.estimator import hyperopt_estimator
from hpsklearn import components
class TestIter(unittest.TestCase):
def setUp(self):
np.random.seed(123)
self.X = np.random.randn(1000, 2)
self.Y = (self.X[:, 0] > 0).astype('int')
def test_fit_iter_basic(self):
model = hyperopt_estimator(verbose=1, trial_timeout=5.0)
for ii, trials in enumerate(model.fit_iter(self.X, self.Y)):
assert trials is model.trials
assert len(trials.trials) == ii
if ii == 10:
break
def test_fit(self):
model = hyperopt_estimator(verbose=1, max_evals=5, trial_timeout=5.0)
model.fit(self.X, self.Y)
assert len(model.trials.trials) == 5
def test_fit_biginc(self):
model = hyperopt_estimator(verbose=1, max_evals=5, trial_timeout=5.0,
fit_increment=20)
model.fit(self.X, self.Y)
# -- make sure we only get 5 even with big fit_increment
assert len(model.trials.trials) == 5
class TestSpace(unittest.TestCase):
def setUp(self):
np.random.seed(123)
self.X = np.random.randn(1000, 2)
self.Y = (self.X[:, 0] > 0).astype('int')
def test_smoke(self):
# -- verify the space argument is accepted and runs
space = components.generic_space()
model = hyperopt_estimator(
verbose=1, max_evals=10, trial_timeout=5, space=space)
model.fit(self.X, self.Y)
# -- flake8 eof
| wavelets/hyperopt-sklearn | hpsklearn/tests/test_estimator.py | Python | bsd-3-clause | 1,604 |
from django.contrib.comments.moderation import CommentModerator, moderator
from scipy_central.submission.models import Revision
class RevisionModerator(CommentModerator):
"""
Comments moderation settings for
`scipy_central.submission.models.Revision` model
"""
enable_field = 'enable_comments'
moderator.register(Revision, RevisionModerator)
| Srisai85/SciPyCentral | scipy_central/comments/moderation.py | Python | bsd-3-clause | 364 |
# archivebox init
# archivebox add
import os
import subprocess
from pathlib import Path
import json, shutil
import sqlite3
from archivebox.config import OUTPUT_PERMISSIONS
from .fixtures import *
def test_init(tmp_path, process):
assert "Initializing a new ArchiveBox" in process.stdout.decode("utf-8")
def test_update(tmp_path, process):
os.chdir(tmp_path)
update_process = subprocess.run(['archivebox', 'init'], capture_output=True)
assert "updating existing ArchiveBox" in update_process.stdout.decode("utf-8")
def test_add_link(tmp_path, process, disable_extractors_dict):
disable_extractors_dict.update({"USE_WGET": "true"})
os.chdir(tmp_path)
add_process = subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'],
capture_output=True, env=disable_extractors_dict)
archived_item_path = list(tmp_path.glob('archive/**/*'))[0]
assert "index.json" in [x.name for x in archived_item_path.iterdir()]
with open(archived_item_path / "index.json", "r", encoding="utf-8") as f:
output_json = json.load(f)
assert "Example Domain" == output_json['history']['title'][0]['output']
with open(archived_item_path / "index.html", "r", encoding="utf-8") as f:
output_html = f.read()
assert "Example Domain" in output_html
def test_add_link_support_stdin(tmp_path, process, disable_extractors_dict):
disable_extractors_dict.update({"USE_WGET": "true"})
os.chdir(tmp_path)
stdin_process = subprocess.Popen(["archivebox", "add"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=disable_extractors_dict)
stdin_process.communicate(input="http://127.0.0.1:8080/static/example.com.html".encode())
archived_item_path = list(tmp_path.glob('archive/**/*'))[0]
assert "index.json" in [x.name for x in archived_item_path.iterdir()]
with open(archived_item_path / "index.json", "r", encoding="utf-8") as f:
output_json = json.load(f)
assert "Example Domain" == output_json['history']['title'][0]['output']
def test_correct_permissions_output_folder(tmp_path, process):
index_files = ['index.sqlite3', 'archive']
for file in index_files:
file_path = tmp_path / file
assert oct(file_path.stat().st_mode)[-3:] == OUTPUT_PERMISSIONS
def test_correct_permissions_add_command_results(tmp_path, process, disable_extractors_dict):
os.chdir(tmp_path)
add_process = subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True,
env=disable_extractors_dict)
archived_item_path = list(tmp_path.glob('archive/**/*'))[0]
for path in archived_item_path.iterdir():
assert oct(path.stat().st_mode)[-3:] == OUTPUT_PERMISSIONS
def test_collision_urls_different_timestamps(tmp_path, process, disable_extractors_dict):
os.chdir(tmp_path)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True,
env=disable_extractors_dict)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/iana.org.html'], capture_output=True,
env=disable_extractors_dict)
archive_folders = [x.name for x in (tmp_path / "archive").iterdir()]
first_archive = tmp_path / "archive" / str(min([float(folder) for folder in archive_folders]))
json_index = str(first_archive / "index.json")
with open(json_index, "r", encoding="utf-8") as f:
link_details = json.loads(f.read())
link_details["url"] = "http://127.0.0.1:8080/static/iana.org.html"
with open(json_index, "w", encoding="utf-8") as f:
json.dump(link_details, f)
init_process = subprocess.run(['archivebox', 'init'], capture_output=True, env=disable_extractors_dict)
# 1 from duplicated url, 1 from corrupted index
assert "Skipped adding 2 invalid link data directories" in init_process.stdout.decode("utf-8")
assert init_process.returncode == 0
def test_collision_timestamps_different_urls(tmp_path, process, disable_extractors_dict):
os.chdir(tmp_path)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True,
env=disable_extractors_dict)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/iana.org.html'], capture_output=True,
env=disable_extractors_dict)
archive_folders = [x.name for x in (tmp_path / "archive").iterdir()]
first_archive = tmp_path / "archive" / str(min([float(folder) for folder in archive_folders]))
archive_folders.remove(first_archive.name)
json_index = str(first_archive / "index.json")
with open(json_index, "r", encoding="utf-8") as f:
link_details = json.loads(f.read())
link_details["timestamp"] = archive_folders[0]
with open(json_index, "w", encoding="utf-8") as f:
json.dump(link_details, f)
init_process = subprocess.run(['archivebox', 'init'], capture_output=True, env=disable_extractors_dict)
assert "Skipped adding 1 invalid link data directories" in init_process.stdout.decode("utf-8")
assert init_process.returncode == 0
def test_orphaned_folders(tmp_path, process, disable_extractors_dict):
os.chdir(tmp_path)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True,
env=disable_extractors_dict)
list_process = subprocess.run(["archivebox", "list", "--json", "--with-headers"], capture_output=True)
with open(tmp_path / "index.json", "wb") as f:
f.write(list_process.stdout)
conn = sqlite3.connect("index.sqlite3")
c = conn.cursor()
c.execute("DELETE from core_snapshot")
conn.commit()
conn.close()
init_process = subprocess.run(['archivebox', 'init'], capture_output=True, env=disable_extractors_dict)
assert "Added 1 orphaned links from existing JSON index" in init_process.stdout.decode("utf-8")
assert init_process.returncode == 0
def test_unrecognized_folders(tmp_path, process, disable_extractors_dict):
os.chdir(tmp_path)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True,
env=disable_extractors_dict)
(tmp_path / "archive" / "some_random_folder").mkdir()
init_process = subprocess.run(['archivebox', 'init'], capture_output=True, env=disable_extractors_dict)
assert "Skipped adding 1 invalid link data directories" in init_process.stdout.decode("utf-8")
assert init_process.returncode == 0
def test_tags_migration(tmp_path, disable_extractors_dict):
base_sqlite_path = Path(__file__).parent / 'tags_migration'
if os.path.exists(tmp_path):
shutil.rmtree(tmp_path)
shutil.copytree(str(base_sqlite_path), tmp_path)
os.chdir(tmp_path)
conn = sqlite3.connect("index.sqlite3")
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("SELECT id, tags from core_snapshot")
snapshots = c.fetchall()
snapshots_dict = { sn['id']: sn['tags'] for sn in snapshots}
conn.commit()
conn.close()
init_process = subprocess.run(['archivebox', 'init'], capture_output=True, env=disable_extractors_dict)
conn = sqlite3.connect("index.sqlite3")
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("""
SELECT core_snapshot.id, core_tag.name from core_snapshot
JOIN core_snapshot_tags on core_snapshot_tags.snapshot_id=core_snapshot.id
JOIN core_tag on core_tag.id=core_snapshot_tags.tag_id
""")
tags = c.fetchall()
conn.commit()
conn.close()
for tag in tags:
snapshot_id = tag["id"]
tag_name = tag["name"]
# Check each tag migrated is in the previous field
assert tag_name in snapshots_dict[snapshot_id]
| pirate/bookmark-archiver | tests/test_init.py | Python | mit | 7,971 |
print("hola mundo, estoy feliz con Github")
| cmontoyau1993/demo-project-py | hola.py | Python | mit | 46 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from collections import defaultdict
from copy import deepcopy
import axiom_rules
import fact_groups
import instantiate
import normalize
import optparse
import pddl
import sas_tasks
import simplify
import sys
import timers
import tools
# TODO: The translator may generate trivial derived variables which are always true,
# for example if there ia a derived predicate in the input that only depends on
# (non-derived) variables which are detected as always true.
# Such a situation was encountered in the PSR-STRIPS-DerivedPredicates domain.
# Such "always-true" variables should best be compiled away, but it is
# not clear what the best place to do this should be. Similar
# simplifications might be possible elsewhere, for example if a
# derived variable is synonymous with another variable (derived or
# non-derived).
ALLOW_CONFLICTING_EFFECTS = True
USE_PARTIAL_ENCODING = True
DETECT_UNREACHABLE = True
DUMP_TASK = False
## Setting the following variable to True can cause a severe
## performance penalty due to weaker relevance analysis (see issue7).
ADD_IMPLIED_PRECONDITIONS = False
DEBUG = False
removed_implied_effect_counter = 0
simplified_effect_condition_counter = 0
added_implied_precondition_counter = 0
def strips_to_sas_dictionary(groups, assert_partial):
dictionary = {}
for var_no, group in enumerate(groups):
for val_no, atom in enumerate(group):
dictionary.setdefault(atom, []).append((var_no, val_no))
if assert_partial:
assert all(len(sas_pairs) == 1
for sas_pairs in dictionary.values())
return [len(group) + 1 for group in groups], dictionary
def translate_strips_conditions_aux(conditions, dictionary, ranges):
condition = {}
for fact in conditions:
if fact.negated:
# we handle negative conditions later, because then we
# can recognize when the negative condition is already
# ensured by a positive condition
continue
for var, val in dictionary.get(fact, ()):
# The default () here is a bit of a hack. For goals (but
# only for goals!), we can get static facts here. They
# cannot be statically false (that would have been
# detected earlier), and hence they are statically true
# and don't need to be translated.
# TODO: This would not be necessary if we dealt with goals
# in the same way we deal with operator preconditions etc.,
# where static facts disappear during grounding. So change
# this when the goal code is refactored (also below). (**)
if (condition.get(var) is not None and
val not in condition.get(var)):
# Conflicting conditions on this variable: Operator invalid.
return None
condition[var] = set([val])
def number_of_values(var_vals_pair):
var, vals = var_vals_pair
return len(vals)
for fact in conditions:
if fact.negated:
## Note Here we use a different solution than in Sec. 10.6.4
## of the thesis. Compare the last sentences of the third
## paragraph of the section.
## We could do what is written there. As a test case,
## consider Airport ADL tasks with only one airport, where
## (occupied ?x) variables are encoded in a single variable,
## and conditions like (not (occupied ?x)) do occur in
## preconditions.
## However, here we avoid introducing new derived predicates
## by treat the negative precondition as a disjunctive precondition
## and expanding it by "multiplying out" the possibilities.
## This can lead to an exponential blow-up so it would be nice
## to choose the behaviour as an option.
done = False
new_condition = {}
atom = pddl.Atom(fact.predicate, fact.args) # force positive
for var, val in dictionary.get(atom, ()):
# see comment (**) above
poss_vals = set(range(ranges[var]))
poss_vals.remove(val)
if condition.get(var) is None:
assert new_condition.get(var) is None
new_condition[var] = poss_vals
else:
# constrain existing condition on var
prev_possible_vals = condition.get(var)
done = True
prev_possible_vals.intersection_update(poss_vals)
if len(prev_possible_vals) == 0:
# Conflicting conditions on this variable:
# Operator invalid.
return None
if not done and len(new_condition) != 0:
# we did not enforce the negative condition by constraining
# an existing condition on one of the variables representing
# this atom. So we need to introduce a new condition:
# We can select any from new_condition and currently prefer the
# smalles one.
candidates = sorted(new_condition.items(), key=number_of_values)
var, vals = candidates[0]
condition[var] = vals
def multiply_out(condition): # destroys the input
sorted_conds = sorted(condition.items(), key=number_of_values)
flat_conds = [{}]
for var, vals in sorted_conds:
if len(vals) == 1:
for cond in flat_conds:
cond[var] = vals.pop() # destroys the input here
else:
new_conds = []
for cond in flat_conds:
for val in vals:
new_cond = deepcopy(cond)
new_cond[var] = val
new_conds.append(new_cond)
flat_conds = new_conds
return flat_conds
return multiply_out(condition)
def translate_strips_conditions(conditions, dictionary, ranges,
mutex_dict, mutex_ranges):
if not conditions:
return [{}] # Quick exit for common case.
# Check if the condition violates any mutexes.
if translate_strips_conditions_aux(
conditions, mutex_dict, mutex_ranges) is None:
return None
return translate_strips_conditions_aux(conditions, dictionary, ranges)
def translate_strips_operator(operator, dictionary, ranges, mutex_dict, mutex_ranges, implied_facts):
conditions = translate_strips_conditions(operator.precondition, dictionary, ranges, mutex_dict, mutex_ranges)
if conditions is None:
return []
sas_operators = []
for condition in conditions:
op = translate_strips_operator_aux(operator, dictionary, ranges,
mutex_dict, mutex_ranges,
implied_facts, condition)
sas_operators.append(op)
return sas_operators
def translate_strips_operator_aux(operator, dictionary, ranges, mutex_dict,
mutex_ranges, implied_facts, condition):
# NOTE: This function does not really deal with the intricacies of properly
# encoding delete effects for grouped propositions in the presence of
# conditional effects. It should work ok but will bail out in more
# complicated cases even though a conflict does not necessarily exist.
possible_add_conflict = False
effect = {}
for conditions, fact in operator.add_effects:
eff_condition_list = translate_strips_conditions(conditions, dictionary,
ranges, mutex_dict,
mutex_ranges)
if eff_condition_list is None: # Impossible condition for this effect.
continue
eff_condition = [sorted(eff_cond.items())
for eff_cond in eff_condition_list]
for var, val in dictionary[fact]:
if condition.get(var) == val:
# Effect implied by precondition.
global removed_implied_effect_counter
removed_implied_effect_counter += 1
# print "Skipping effect of %s..." % operator.name
continue
effect_pair = effect.get(var)
if not effect_pair:
effect[var] = (val, eff_condition)
else:
other_val, eff_conditions = effect_pair
# Don't flag conflict just yet... the operator might be invalid
# because of conflicting add/delete effects (see pipesworld).
if other_val != val:
possible_add_conflict = True
eff_conditions.extend(eff_condition)
for conditions, fact in operator.del_effects:
eff_condition_list = translate_strips_conditions(conditions, dictionary, ranges, mutex_dict, mutex_ranges)
if eff_condition_list is None:
continue
eff_condition = [sorted(eff_cond.items())
for eff_cond in eff_condition_list]
for var, val in dictionary[fact]:
none_of_those = ranges[var] - 1
other_val, eff_conditions = effect.setdefault(var, (none_of_those, []))
if other_val != none_of_those:
# Look for matching add effect; ignore this del effect if found.
for cond in eff_condition:
if cond not in eff_conditions and [] not in eff_conditions:
print("Condition:")
print(cond)
print("Operator:")
operator.dump()
assert False, "Add effect with uncertain del effect partner?"
if other_val == val:
if ALLOW_CONFLICTING_EFFECTS:
# Conflicting ADD and DEL effect. This is *only* allowed if
# this is also a precondition, in which case there is *no*
# effect (the ADD takes precedence). We delete the add effect here.
if condition.get(var) != val:
# HACK HACK HACK!
# There used to be an assertion here that actually
# forbid this, but this was wrong in Pipesworld-notankage
# (e.g. task 01). The thing is, it *is* possible for
# an operator with proven (with the given invariants)
# inconsistent preconditions to actually end up here if
# the inconsistency of the preconditions is not obvious at
# the SAS+ encoding level.
#
# Example: Pipes-Notank-01, operator
# (pop-unitarypipe s12 b4 a1 a2 b4 lco lco).
# This has precondition last(b4, s12) and on(b4, a2) which
# is inconsistent because b4 can only be in one place.
# However, the chosen encoding encodes *what is last in s12*
# separately, and so the precondition translates to
# "last(s12) = b4 and on(b4) = a2", which does not look
# inconsistent at first glance.
#
# Something reasonable to do here would be to make a
# decent check that the precondition is indeed inconsistent
# (using *all* mutexes), but that seems tough with this
# convoluted code, so we just warn and reject the operator.
print("Warning: %s rejected. Cross your fingers." % (
operator.name))
if DEBUG:
operator.dump()
return None
assert False
assert eff_conditions == [[]]
del effect[var]
else:
assert not eff_condition[0] and not eff_conditions[0], "Uncertain conflict"
return None # Definite conflict otherwise.
else: # no add effect on this variable
if condition.get(var) != val:
if var in condition:
## HACK HACK HACK! There is a precondition on the variable for
## this delete effect on another value, so there is no need to
## represent the delete effect. Right? Right???
del effect[var]
continue
for index, cond in enumerate(eff_condition_list):
if cond.get(var) != val:
# Need a guard for this delete effect.
assert (var not in condition and
var not in eff_condition[index]), "Oops?"
eff_condition[index].append((var, val))
eff_conditions.extend(eff_condition)
if possible_add_conflict:
operator.dump()
assert not possible_add_conflict, "Conflicting add effects?"
# assert eff_condition != other_condition, "Duplicate effect"
# assert eff_condition and other_condition, "Dominated conditional effect"
if ADD_IMPLIED_PRECONDITIONS:
implied_precondition = set()
for fact in condition.items():
implied_precondition.update(implied_facts[fact])
pre_post = []
for var, (post, eff_condition_lists) in effect.items():
pre = condition.pop(var, -1)
if ranges[var] == 2:
# Apply simplifications for binary variables.
if prune_stupid_effect_conditions(var, post, eff_condition_lists):
global simplified_effect_condition_counter
simplified_effect_condition_counter += 1
if (ADD_IMPLIED_PRECONDITIONS and
pre == -1 and (var, 1 - post) in implied_precondition):
global added_implied_precondition_counter
added_implied_precondition_counter += 1
pre = 1 - post
# print "Added precondition (%d = %d) to %s" % (
# var, pre, operator.name)
for eff_condition in eff_condition_lists:
pre_post.append((var, pre, post, eff_condition))
prevail = list(condition.items())
return sas_tasks.SASOperator(operator.name, prevail, pre_post, operator.cost)
def prune_stupid_effect_conditions(var, val, conditions):
## (IF <conditions> THEN <var> := <val>) is a conditional effect.
## <var> is guaranteed to be a binary variable.
## <conditions> is in DNF representation (list of lists).
##
## We simplify <conditions> by applying two rules:
## 1. Conditions of the form "var = dualval" where var is the
## effect variable and dualval != val can be omitted.
## (If var != dualval, then var == val because it is binary,
## which mesans that in such situations the effect is a no-op.)
## 2. If conditions contains any empty list, it is equivalent
## to True and we can remove all other disjuncts.
##
## returns True when anything was changed
if conditions == [[]]:
return False ## Quick exit for common case.
assert val in [0, 1]
dual_fact = (var, 1 - val)
simplified = False
for condition in conditions:
# Apply rule 1.
while dual_fact in condition:
# print "*** Removing dual condition"
simplified = True
condition.remove(dual_fact)
# Apply rule 2.
if not condition:
conditions[:] = [[]]
simplified = True
break
return simplified
def translate_strips_axiom(axiom, dictionary, ranges, mutex_dict, mutex_ranges):
conditions = translate_strips_conditions(axiom.condition, dictionary, ranges, mutex_dict, mutex_ranges)
if conditions is None:
return []
if axiom.effect.negated:
[(var, _)] = dictionary[axiom.effect.positive()]
effect = (var, ranges[var] - 1)
else:
[effect] = dictionary[axiom.effect]
axioms = []
for condition in conditions:
axioms.append(sas_tasks.SASAxiom(condition.items(), effect))
return axioms
def translate_strips_operators(actions, strips_to_sas, ranges, mutex_dict, mutex_ranges, implied_facts):
result = []
for action in actions:
sas_ops = translate_strips_operator(action, strips_to_sas, ranges, mutex_dict, mutex_ranges, implied_facts)
result.extend(sas_ops)
return result
def translate_strips_axioms(axioms, strips_to_sas, ranges, mutex_dict, mutex_ranges):
result = []
for axiom in axioms:
sas_axioms = translate_strips_axiom(axiom, strips_to_sas, ranges, mutex_dict, mutex_ranges)
result.extend(sas_axioms)
return result
def dump_task(init, goals, actions, axioms, axiom_layer_dict):
old_stdout = sys.stdout
with open("output.dump", "w") as dump_file:
sys.stdout = dump_file
print("Initial state")
for atom in init:
print(atom)
print()
print("Goals")
for goal in goals:
print(goal)
for action in actions:
print()
print("Action")
action.dump()
for axiom in axioms:
print()
print("Axiom")
axiom.dump()
print()
print("Axiom layers")
for atom, layer in axiom_layer_dict.items():
print("%s: layer %d" % (atom, layer))
sys.stdout = old_stdout
def translate_task(strips_to_sas, ranges, translation_key,
mutex_dict, mutex_ranges, mutex_key,
init, goals,
actions, axioms, metric, implied_facts):
with timers.timing("Processing axioms", block=True):
axioms, axiom_init, axiom_layer_dict = axiom_rules.handle_axioms(
actions, axioms, goals)
init = init + axiom_init
#axioms.sort(key=lambda axiom: axiom.name)
#for axiom in axioms:
# axiom.dump()
if DUMP_TASK:
# Remove init facts that don't occur in strips_to_sas: they are constant.
nonconstant_init = filter(strips_to_sas.get, init)
dump_task(nonconstant_init, goals, actions, axioms, axiom_layer_dict)
init_values = [rang - 1 for rang in ranges]
# Closed World Assumption: Initialize to "range - 1" == Nothing.
for fact in init:
pairs = strips_to_sas.get(fact, []) # empty for static init facts
for var, val in pairs:
curr_val = init_values[var]
if curr_val != ranges[var] - 1 and curr_val != val:
assert False, "Inconsistent init facts! [fact = %s]" % fact
init_values[var] = val
init = sas_tasks.SASInit(init_values)
goal_dict_list = translate_strips_conditions(goals, strips_to_sas, ranges, mutex_dict, mutex_ranges)
assert len(goal_dict_list) == 1, "Negative goal not supported"
## we could substitute the negative goal literal in
## normalize.substitute_complicated_goal, using an axiom. We currently
## don't do this, because we don't run into this assertion, if the
## negative goal is part of finite domain variable with only two
## values, which is most of the time the case, and hence refrain from
## introducing axioms (that are not supported by all heuristics)
goal_pairs = list(goal_dict_list[0].items())
goal = sas_tasks.SASGoal(goal_pairs)
operators = translate_strips_operators(actions, strips_to_sas, ranges, mutex_dict, mutex_ranges, implied_facts)
axioms = translate_strips_axioms(axioms, strips_to_sas, ranges, mutex_dict, mutex_ranges)
axiom_layers = [-1] * len(ranges)
for atom, layer in axiom_layer_dict.items():
assert layer >= 0
[(var, val)] = strips_to_sas[atom]
axiom_layers[var] = layer
variables = sas_tasks.SASVariables(ranges, axiom_layers, translation_key)
mutexes = [sas_tasks.SASMutexGroup(group) for group in mutex_key]
return sas_tasks.SASTask(variables, mutexes, init, goal,
operators, axioms, metric)
def unsolvable_sas_task(msg):
print("%s! Generating unsolvable task..." % msg)
variables = sas_tasks.SASVariables(
[2], [-1], [["Atom dummy(val1)", "Atom dummy(val2)"]])
# We create no mutexes: the only possible mutex is between
# dummy(val1) and dummy(val2), but the preprocessor would filter
# it out anyway since it is trivial (only involves one
# finite-domain variable).
mutexes = []
init = sas_tasks.SASInit([0])
goal = sas_tasks.SASGoal([(0, 1)])
operators = []
axioms = []
metric = True
return sas_tasks.SASTask(variables, mutexes, init, goal,
operators, axioms, metric)
def pddl_to_sas(task):
with timers.timing("Instantiating", block=True):
(relaxed_reachable, atoms, actions, axioms,
reachable_action_params) = instantiate.explore(task)
if not relaxed_reachable:
return unsolvable_sas_task("No relaxed solution")
# HACK! Goals should be treated differently.
if isinstance(task.goal, pddl.Conjunction):
goal_list = task.goal.parts
else:
goal_list = [task.goal]
for item in goal_list:
assert isinstance(item, pddl.Literal)
with timers.timing("Computing fact groups", block=True):
groups, mutex_groups, translation_key = fact_groups.compute_groups(
task, atoms, reachable_action_params,
partial_encoding=USE_PARTIAL_ENCODING)
with timers.timing("Building STRIPS to SAS dictionary"):
ranges, strips_to_sas = strips_to_sas_dictionary(
groups, assert_partial=USE_PARTIAL_ENCODING)
with timers.timing("Building dictionary for full mutex groups"):
mutex_ranges, mutex_dict = strips_to_sas_dictionary(
mutex_groups, assert_partial=False)
if ADD_IMPLIED_PRECONDITIONS:
with timers.timing("Building implied facts dictionary..."):
implied_facts = build_implied_facts(strips_to_sas, groups, mutex_groups)
else:
implied_facts = {}
with timers.timing("Building mutex information", block=True):
mutex_key = build_mutex_key(strips_to_sas, mutex_groups)
with timers.timing("Translating task", block=True):
sas_task = translate_task(
strips_to_sas, ranges, translation_key,
mutex_dict, mutex_ranges, mutex_key,
task.init, goal_list, actions, axioms, task.use_min_cost_metric,
implied_facts)
print("%d implied effects removed" % removed_implied_effect_counter)
print("%d effect conditions simplified" % simplified_effect_condition_counter)
print("%d implied preconditions added" % added_implied_precondition_counter)
if DETECT_UNREACHABLE:
with timers.timing("Detecting unreachable propositions", block=True):
try:
simplify.filter_unreachable_propositions(sas_task)
except simplify.Impossible:
return unsolvable_sas_task("Simplified to trivially false goal")
return sas_task
def build_mutex_key(strips_to_sas, groups):
group_keys = []
for group in groups:
group_key = []
for fact in group:
if strips_to_sas.get(fact):
for var, val in strips_to_sas[fact]:
group_key.append((var, val))
else:
print("not in strips_to_sas, left out:", fact)
group_keys.append(group_key)
return group_keys
def build_implied_facts(strips_to_sas, groups, mutex_groups):
## Compute a dictionary mapping facts (FDR pairs) to lists of FDR
## pairs implied by that fact. In other words, in all states
## containing p, all pairs in implied_facts[p] must also be true.
##
## There are two simple cases where a pair p implies a pair q != p
## in our FDR encodings:
## 1. p and q encode the same fact
## 2. p encodes a STRIPS proposition X, q encodes a STRIPS literal
## "not Y", and X and Y are mutex.
##
## The first case cannot arise when we use partial encodings, and
## when we use full encodings, I don't think it would give us any
## additional information to exploit in the operator translation,
## so we only use the second case.
##
## Note that for a pair q to encode a fact "not Y", Y must form a
## fact group of size 1. We call such propositions Y "lonely".
## In the first step, we compute a dictionary mapping each lonely
## proposition to its variable number.
lonely_propositions = {}
for var_no, group in enumerate(groups):
if len(group) == 1:
lonely_prop = group[0]
assert strips_to_sas[lonely_prop] == [(var_no, 0)]
lonely_propositions[lonely_prop] = var_no
## Then we compute implied facts as follows: for each mutex group,
## check if prop is lonely (then and only then "not prop" has a
## representation as an FDR pair). In that case, all other facts
## in this mutex group imply "not prop".
implied_facts = defaultdict(list)
for mutex_group in mutex_groups:
for prop in mutex_group:
prop_var = lonely_propositions.get(prop)
if prop_var is not None:
prop_is_false = (prop_var, 1)
for other_prop in mutex_group:
if other_prop is not prop:
for other_fact in strips_to_sas[other_prop]:
implied_facts[other_fact].append(prop_is_false)
return implied_facts
def dump_statistics(sas_task):
print("Translator variables: %d" % len(sas_task.variables.ranges))
print(("Translator derived variables: %d" %
len([layer for layer in sas_task.variables.axiom_layers if layer >= 0])))
print("Translator facts: %d" % sum(sas_task.variables.ranges))
print("Translator mutex groups: %d" % len(sas_task.mutexes))
print(("Translator total mutex groups size: %d" %
sum(mutex.get_encoding_size() for mutex in sas_task.mutexes)))
print("Translator operators: %d" % len(sas_task.operators))
print("Translator task size: %d" % sas_task.get_encoding_size())
try:
peak_memory = tools.get_peak_memory_in_kb()
except Warning as warning:
print(warning)
else:
print("Translator peak memory: %d KB" % peak_memory)
def check_python_version(force_old_python):
if sys.version_info[:2] == (2, 6):
if force_old_python:
print("Warning: Running with slow Python 2.6", file=sys.stderr)
else:
print("Error: Python 2.6 runs the translator very slowly. You should "
"use Python 2.7 or 3.x instead. If you really need to run it "
"with Python 2.6, you can pass the --force-old-python flag.",
file=sys.stderr)
sys.exit(1)
def parse_options():
optparser = optparse.OptionParser(usage="Usage: %prog [options] [<domain.pddl>] <task.pddl>")
optparser.add_option(
"--relaxed", dest="generate_relaxed_task", action="store_true",
help="Output relaxed task (no delete effects)")
optparser.add_option(
"--force-old-python", action="store_true",
help="Allow running the translator with slow Python 2.6")
options, args = optparser.parse_args()
# Remove the parsed options from sys.argv
sys.argv = [sys.argv[0]] + args
return options, args
def main():
options, args = parse_options()
check_python_version(options.force_old_python)
timer = timers.Timer()
with timers.timing("Parsing", True):
task = pddl.open()
with timers.timing("Normalizing task"):
normalize.normalize(task)
if options.generate_relaxed_task:
# Remove delete effects.
for action in task.actions:
for index, effect in reversed(list(enumerate(action.effects))):
if effect.literal.negated:
del action.effects[index]
sas_task = pddl_to_sas(task)
dump_statistics(sas_task)
with timers.timing("Writing output"):
with open("output.sas", "w") as output_file:
sas_task.output(output_file)
print("Done! %s" % timer)
if __name__ == "__main__":
main()
| fawcettc/planning-features | fast-downward/translate/translate.py | Python | agpl-3.0 | 28,962 |
import fnmatch
import os
from pathlib import Path
from subprocess import PIPE, Popen
from django.apps import apps as installed_apps
from django.utils.crypto import get_random_string
from django.utils.encoding import DEFAULT_LOCALE_ENCODING
from .base import CommandError, CommandParser
def popen_wrapper(args, stdout_encoding='utf-8'):
"""
Friendly wrapper around Popen.
Return stdout output, stderr output, and OS status code.
"""
try:
p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt')
except OSError as err:
raise CommandError('Error executing %s' % args[0]) from err
output, errors = p.communicate()
return (
output.decode(stdout_encoding),
errors.decode(DEFAULT_LOCALE_ENCODING, errors='replace'),
p.returncode
)
def handle_extensions(extensions):
"""
Organize multiple extensions that are separated with commas or passed by
using --extension/-e multiple times.
For example: running 'django-admin makemessages -e js,txt -e xhtml -a'
would result in an extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
{'.html', '.js', '.py'}
>>> handle_extensions(['.html, txt,.tpl'])
{'.html', '.tpl', '.txt'}
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ', '').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
return set(ext_list)
def find_command(cmd, path=None, pathext=None):
if path is None:
path = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(path, str):
path = [path]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD').split(os.pathsep)
# don't use extensions if the command ends with one of them
for ext in pathext:
if cmd.endswith(ext):
pathext = ['']
break
# check if we find the command on PATH
for p in path:
f = os.path.join(p, cmd)
if os.path.isfile(f):
return f
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
def get_random_secret_key():
"""
Return a 50 character random string usable as a SECRET_KEY setting value.
"""
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
return get_random_string(50, chars)
def parse_apps_and_model_labels(labels):
"""
Parse a list of "app_label.ModelName" or "app_label" strings into actual
objects and return a two-element tuple:
(set of model classes, set of app_configs).
Raise a CommandError if some specified models or apps don't exist.
"""
apps = set()
models = set()
for label in labels:
if '.' in label:
try:
model = installed_apps.get_model(label)
except LookupError:
raise CommandError('Unknown model: %s' % label)
models.add(model)
else:
try:
app_config = installed_apps.get_app_config(label)
except LookupError as e:
raise CommandError(str(e))
apps.add(app_config)
return models, apps
def get_command_line_option(argv, option):
"""
Return the value of a command line option (which should include leading
dashes, e.g. '--testrunnner') from an argument list. Return None if the
option wasn't passed or if the argument list couldn't be parsed.
"""
parser = CommandParser(add_help=False, allow_abbrev=False)
parser.add_argument(option, dest='value')
try:
options, _ = parser.parse_known_args(argv[2:])
except CommandError:
return None
else:
return options.value
def normalize_path_patterns(patterns):
"""Normalize an iterable of glob style patterns based on OS."""
patterns = [os.path.normcase(p) for p in patterns]
dir_suffixes = {'%s*' % path_sep for path_sep in {'/', os.sep}}
norm_patterns = []
for pattern in patterns:
for dir_suffix in dir_suffixes:
if pattern.endswith(dir_suffix):
norm_patterns.append(pattern[:-len(dir_suffix)])
break
else:
norm_patterns.append(pattern)
return norm_patterns
def is_ignored_path(path, ignore_patterns):
"""
Check if the given path should be ignored or not based on matching
one of the glob style `ignore_patterns`.
"""
path = Path(path)
def ignore(pattern):
return fnmatch.fnmatchcase(path.name, pattern) or fnmatch.fnmatchcase(str(path), pattern)
return any(ignore(pattern) for pattern in normalize_path_patterns(ignore_patterns))
| fenginx/django | django/core/management/utils.py | Python | bsd-3-clause | 4,924 |
import sys
import os
import subprocess
import time
def get_slaves():
slaves = subprocess.check_output(['cat', '/root/spark-ec2/slaves'])
return slaves.strip().split('\n')
slaves = get_slaves()
for slave in slaves:
subprocess.call(['ssh', slave, 'killall', 'iperf3'])
subprocess.call(['scp', '/root/spark-ec2/test_speed_slave.py', slave+':/root/'])
iperf_master = subprocess.Popen(['iperf3', '-s', '-p', '6789'])
iperf_slaves = []
for slave in slaves:
iperf_slaves.append(subprocess.check_output(['ssh', slave, 'python', 'test_speed_slave.py']))
time.sleep(1)
print "terminating master"
iperf_master.terminate()
subprocess.call(['killall', 'iperf3'])
time.sleep(1)
print "checking slaves speed"
for iperf_slave in iperf_slaves:
print iperf_slave.strip()
| Gaojiaqi/spark-ec2 | test_speed_master.py | Python | apache-2.0 | 789 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import importlib
from warnings import warn
from pyspd.locator import LocatorInterface
class LocatorModule(LocatorInterface):
"""Locates plugins given as pure python module name (foo.bar.baz).
Locator expects that PYTHONPATH is set correctly
"""
def __init__(self, *modules):
"""
Object initialization
Arguments:
:param *modules: list of modules to be loaded
:type *modules: list
"""
warn("pyspd.locator.module.LocatorModule is deprecated." +
"Use pyspd.loader.LoaderModule instead")
self._modules = list(modules)
def __call__(self):
"""
Loads plugins from given modules
"""
warn("pyspd.locator.module.LocatorModule is deprecated." +
"Use pyspd.loader.LoaderModule instead")
for module in self._modules:
importlib.import_module(module)
| michalbachowski/pyspd | src/pyspd/locator/module.py | Python | mit | 964 |
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
| jarble/EngScript | libraries/factors.py | Python | mit | 136 |
# Copyright (c) 2020, Manfred Moitzi
# License: MIT License
import pytest
from ezdxf.lldxf.attributes import DXFAttr, RETURN_DEFAULT
def test_return_default():
attr = DXFAttr(
code=62,
default=12,
validator=lambda x: False,
fixer=RETURN_DEFAULT,
)
assert attr.fixer(7) == 12
attr2 = DXFAttr(
code=63,
default=13,
validator=lambda x: False,
fixer=RETURN_DEFAULT,
)
assert attr2.fixer(7) == 13
if __name__ == "__main__":
pytest.main([__file__])
| mozman/ezdxf | tests/test_00_dxf_low_level_structs/test_054_dxfattr.py | Python | mit | 541 |
"""SCons.Tool.dvi
Common DVI Builder definition for various other Tool modules that use it.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/dvi.py 2013/03/03 09:48:35 garyo"
import SCons.Builder
import SCons.Tool
DVIBuilder = None
def generate(env):
try:
env['BUILDERS']['DVI']
except KeyError:
global DVIBuilder
if DVIBuilder is None:
# The suffix is hard-coded to '.dvi', not configurable via a
# construction variable like $DVISUFFIX, because the output
# file name is hard-coded within TeX.
DVIBuilder = SCons.Builder.Builder(action = {},
source_scanner = SCons.Tool.LaTeXScanner,
suffix = '.dvi',
emitter = {},
source_ext_match = None)
env['BUILDERS']['DVI'] = DVIBuilder
def exists(env):
# This only puts a skeleton Builder in place, so if someone
# references this Tool directly, it's always "available."
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| aubreyrjones/libesp | scons_local/scons-local-2.3.0/SCons/Tool/dvi.py | Python | mit | 2,400 |
import httplib
import urllib
import xml.dom.minidom
from utils import parse_xml
from paython.gateways.core import Gateway
from paython.exceptions import RequestError, GatewayError, DataValidationError
class XMLGateway(Gateway):
def __init__(self, host, translations, debug=False, special_params={}):
""" initalize API call session
host: hostname (apigateway.tld)
auth: accept a tuple with (username,password)
debug: True/False
"""
self.doc = xml.dom.minidom.Document()
self.api_host = host
self.debug = debug
self.parse_xml = parse_xml
self.special_ssl = special_params
super(XMLGateway, self).__init__(set_method=self.set, translations=translations, debug=debug)
def set(self, path, child=False, attribute=False):
""" Accepts a forward slash seperated path of XML elements to traverse and create if non existent.
Optional child and target node attributes can be set. If the `child` attribute is a tuple
it will create X child nodes by reading each tuple as (name, text, 'attribute:value') where value
and attributes are optional for each tuple.
- path: forward slash seperated API element path as string (example: "Order/Authentication/Username")
- child: tuple of child node data or string to create a text node
- attribute: sets the target XML attributes (string format: "Key:Value")
"""
try:
xml_path = path.split('/')
except AttributeError:
return # because if it's None, then don't worry
xml_doc = self.doc
# traverse full XML element path string `path`
for element_name in xml_path:
# get existing XML element by `element_name`
element = self.doc.getElementsByTagName(element_name)
if element: element = element[0]
# create element if non existing or target element
if not element or element_name == xml_path[-1:][0]:
element = self.doc.createElement(element_name)
xml_doc.appendChild(element)
xml_doc = element
if child:
# create child elements from an tuple with optional text node or attributes
# format: ((name1, text, 'attribute:value'), (name2, text2))
if isinstance(child, tuple):
for obj in child:
child = self.doc.createElement(obj[0])
if len(obj) >= 2:
element = self.doc.createTextNode(str(obj[1]))
child.appendChild(element)
if len(obj) == 3:
a = obj[2].split(':')
child.setAttribute(a[0], a[1])
xml_doc.appendChild(child)
# create a single text child node
else:
element = self.doc.createTextNode(str(child))
xml_doc.appendChild(element)
# target element attributes
if attribute:
#checking to see if we have a list of attributes
if '|' in attribute:
attributes = attribute.split('|')
else:
#if not just put this into a list so we have the same data type no matter what
attributes = [attribute]
# adding attributes for each item
for attribute in attributes:
attribute = attribute.split(':')
xml_doc.setAttribute(attribute[0], attribute[1])
def request_xml(self):
"""
Stringifies request xml for debugging
"""
return self.doc.toprettyxml()
def make_request(self, api_uri):
"""
Submits the API request as XML formated string via HTTP POST and parse gateway response.
This needs to be run after adding some data via 'set'
"""
request_body = self.doc.toxml('utf-8')
# checking to see if we have any special params
if self.special_ssl:
kwargs = self.special_ssl
api = httplib.HTTPSConnection(self.api_host, **kwargs)
else:
api = httplib.HTTPSConnection(self.api_host)
api.connect()
api.putrequest('POST', api_uri, skip_host=True)
api.putheader('Host', self.api_host)
api.putheader('Content-type', 'text/xml; charset="utf-8"')
api.putheader("Content-length", str(len(request_body)))
api.putheader('User-Agent', 'yourdomain.net')
api.endheaders()
api.send(request_body)
resp = api.getresponse()
resp_data = resp.read()
# parse API call response
if not resp.status == 200:
raise RequestError('Gateway returned %i status' % resp.status)
# parse XML response and return as dict
try:
resp_dict = self.parse_xml(resp_data)
except:
try:
resp_dict = self.parse_xml('<?xml version="1.0"?><response>%s</response>' % resp_data)
except:
raise RequestError('Could not parse XML into JSON')
return resp_dict
class SOAPGateway(object):
pass
class GetGateway(Gateway):
REQUEST_DICT = {}
debug = False
def __init__(self, translations, debug):
"""core GETgateway class"""
super(GetGateway, self).__init__(set_method=self.set, translations=translations, debug=debug)
self.debug = debug
def set(self, key, value):
"""
Setups request dict for Get
"""
self.REQUEST_DICT[key] = value
def unset(self, key):
"""
Sets up request dict for Get
"""
try:
del self.REQUEST_DICT[key]
except KeyError:
raise DataValidationError('The key being unset is non-existent in the request dictionary.')
def query_string(self):
"""
Build the query string to use later (in get)
"""
request_query = '?%s' % urllib.urlencode(self.REQUEST_DICT)
return request_query
def make_request(self, uri):
"""
GETs url with params - simple enough... string uri, string params
"""
try:
params = self.query_string()
request = urllib.urlopen('%s%s' % (uri, params))
return request.read()
except:
raise GatewayError('Error making request to gateway')
class PostGateway(Gateway):
REQUEST_DICT = {}
debug = False
def __init__(self, translations, debug):
"""core POSTgateway class"""
super(PostGateway, self).__init__(set_method=self.set, translations=translations, debug=debug)
self.debug = debug
def set(self, key, value):
"""
Setups request dict for Post
"""
self.REQUEST_DICT[key] = value
def params(self):
"""
returns arguments that are going to be sent to the POST (here for debugging)
"""
return urllib.urlencode(self.REQUEST_DICT)
def make_request(self, uri):
"""
POSTs to url with params (self.REQUEST_DICT) - simple enough... string uri, dict params
"""
try:
request = urllib.urlopen(uri, self.params())
return request.read()
except:
raise GatewayError('Error making request to gateway')
| vauxoo-dev/Paython | paython/lib/api.py | Python | mit | 7,371 |
"""
Copyright (c) 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import os.path
import re
from atomic_reactor.constants import (
PLUGIN_RESOLVE_REMOTE_SOURCE, REMOTE_SOURCE_DIR, CACHITO_ENV_ARG_ALIAS, CACHITO_ENV_FILENAME)
from atomic_reactor.utils.koji import get_koji_task_owner
from atomic_reactor.plugin import PreBuildPlugin
from atomic_reactor.plugins.build_orchestrate_build import override_build_kwarg
from atomic_reactor.plugins.pre_reactor_config import (
get_cachito, get_cachito_session, get_koji_session)
from atomic_reactor.util import get_build_json, is_scratch_build
class ResolveRemoteSourcePlugin(PreBuildPlugin):
"""Initiate a new Cachito request for sources
This plugin will read the remote_sources configuration from
container.yaml in the git repository, use it to make a request
to Cachito, and wait for the request to complete.
"""
key = PLUGIN_RESOLVE_REMOTE_SOURCE
is_allowed_to_fail = False
def __init__(self, tasker, workflow, dependency_replacements=None):
"""
:param tasker: ContainerTasker instance
:param workflow: DockerBuildWorkflow instance
:param dependency_replacements: list<str>, dependencies for the cachito fetched artifact to
be replaced. Must be of the form pkg_manager:name:version[:new_name]
"""
super(ResolveRemoteSourcePlugin, self).__init__(tasker, workflow)
self._cachito_session = None
self._osbs = None
self._dependency_replacements = self.parse_dependency_replacements(dependency_replacements)
def parse_dependency_replacements(self, replacement_strings):
"""Parse dependency_replacements param and return cachito-reaady dependency replacement dict
param replacement_strings: list<str>, pkg_manager:name:version[:new_name]
return: list<dict>, cachito formated dependency replacements param
"""
if not replacement_strings:
return
dependency_replacements = []
for dr_str in replacement_strings:
pkg_manager, name, version, new_name = (dr_str.split(':', 3) + [None] * 4)[:4]
if None in [pkg_manager, name, version]:
raise ValueError('Cachito dependency replacements must be '
'"pkg_manager:name:version[:new_name]". got {}'.format(dr_str))
dr = {'type': pkg_manager, 'name': name, 'version': version}
if new_name:
dr['new_name'] = new_name
dependency_replacements.append(dr)
return dependency_replacements
def run(self):
try:
get_cachito(self.workflow)
except KeyError:
self.log.info('Aborting plugin execution: missing Cachito configuration')
return
remote_source_params = self.workflow.source.config.remote_source
if not remote_source_params:
self.log.info('Aborting plugin execution: missing remote_source configuration')
return
if self._dependency_replacements and not is_scratch_build(self.workflow):
raise ValueError('Cachito dependency replacements are only allowed for scratch builds')
user = self.get_koji_user()
self.log.info('Using user "%s" for cachito request', user)
source_request = self.cachito_session.request_sources(
user=user,
dependency_replacements=self._dependency_replacements,
**remote_source_params
)
source_request = self.cachito_session.wait_for_request(source_request)
remote_source_json = self.source_request_to_json(source_request)
remote_source_url = self.cachito_session.assemble_download_url(source_request)
remote_source_conf_url = remote_source_json.get('configuration_files')
remote_source_icm_url = remote_source_json.get('content_manifest')
self.set_worker_params(source_request, remote_source_url, remote_source_conf_url,
remote_source_icm_url)
dest_dir = self.workflow.source.workdir
dest_path = self.cachito_session.download_sources(source_request, dest_dir=dest_dir)
return {
# Annotations to be added to the current Build object
'annotations': {'remote_source_url': remote_source_url},
# JSON representation of the remote source request
'remote_source_json': remote_source_json,
# Local path to the remote source archive
'remote_source_path': dest_path,
}
def set_worker_params(self, source_request, remote_source_url, remote_source_conf_url,
remote_source_icm_url):
build_args = {}
# This matches values such as 'deps/gomod' but not 'true'
rel_path_regex = re.compile(r'^[^/]+/[^/]+(?:/[^/]+)*$')
for env_var, value in source_request.get('environment_variables', {}).items():
# Turn the environment variables that are relative paths into absolute paths that
# represent where the remote sources are copied to during the build process.
if re.match(rel_path_regex, value):
abs_path = os.path.join(REMOTE_SOURCE_DIR, value)
self.log.debug(
'Setting the Cachito environment variable "%s" to the absolute path "%s"',
env_var,
abs_path,
)
build_args[env_var] = abs_path
else:
build_args[env_var] = value
# Alias for absolute path to cachito.env script added into buildargs
build_args[CACHITO_ENV_ARG_ALIAS] = os.path.join(REMOTE_SOURCE_DIR, CACHITO_ENV_FILENAME)
override_build_kwarg(self.workflow, 'remote_source_url', remote_source_url)
override_build_kwarg(self.workflow, 'remote_source_build_args', build_args)
override_build_kwarg(self.workflow, 'remote_source_configs', remote_source_conf_url)
override_build_kwarg(self.workflow, 'remote_source_icm_url', remote_source_icm_url)
def source_request_to_json(self, source_request):
"""Create a relevant representation of the source request"""
required = ('packages', 'ref', 'repo')
optional = ('dependencies', 'flags', 'pkg_managers', 'environment_variables',
'configuration_files', 'content_manifest')
data = {}
try:
data.update({k: source_request[k] for k in required})
except KeyError as exc:
msg = 'Received invalid source request from Cachito: {}'.format(source_request)
self.log.exception(msg)
raise ValueError(msg) from exc
data.update({k: source_request.get(k, []) for k in optional})
return data
def get_koji_user(self):
unknown_user = get_cachito(self.workflow).get('unknown_user', 'unknown_user')
try:
metadata = get_build_json()['metadata']
except KeyError:
msg = 'Unable to get koji user: No build metadata'
self.log.warning(msg)
return unknown_user
try:
koji_task_id = int(metadata.get('labels').get('koji-task-id'))
except (ValueError, TypeError, AttributeError):
msg = 'Unable to get koji user: Invalid Koji task ID'
self.log.warning(msg)
return unknown_user
koji_session = get_koji_session(self.workflow)
return get_koji_task_owner(koji_session, koji_task_id).get('name', unknown_user)
@property
def cachito_session(self):
if not self._cachito_session:
self._cachito_session = get_cachito_session(self.workflow)
return self._cachito_session
| DBuildService/atomic-reactor | atomic_reactor/plugins/pre_resolve_remote_source.py | Python | bsd-3-clause | 7,859 |
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2021
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents an invite link for a chat."""
import datetime
from typing import TYPE_CHECKING, Any, Optional
from telegram import TelegramObject, User
from telegram.utils.helpers import from_timestamp, to_timestamp
from telegram.utils.types import JSONDict
if TYPE_CHECKING:
from telegram import Bot
class ChatInviteLink(TelegramObject):
"""This object represents an invite link for a chat.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`invite_link`, :attr:`creator`, :attr:`is_primary` and
:attr:`is_revoked` are equal.
.. versionadded:: 13.4
Args:
invite_link (:obj:`str`): The invite link.
creator (:class:`telegram.User`): Creator of the link.
is_primary (:obj:`bool`): :obj:`True`, if the link is primary.
is_revoked (:obj:`bool`): :obj:`True`, if the link is revoked.
expire_date (:class:`datetime.datetime`, optional): Date when the link will expire or
has been expired.
member_limit (:obj:`int`, optional): Maximum number of users that can be members of the
chat simultaneously after joining the chat via this invite link; 1-99999.
Attributes:
invite_link (:obj:`str`): The invite link. If the link was created by another chat
administrator, then the second part of the link will be replaced with ``'…'``.
creator (:class:`telegram.User`): Creator of the link.
is_primary (:obj:`bool`): :obj:`True`, if the link is primary.
is_revoked (:obj:`bool`): :obj:`True`, if the link is revoked.
expire_date (:class:`datetime.datetime`): Optional. Date when the link will expire or
has been expired.
member_limit (:obj:`int`): Optional. Maximum number of users that can be members
of the chat simultaneously after joining the chat via this invite link; 1-99999.
"""
__slots__ = (
'invite_link',
'creator',
'is_primary',
'is_revoked',
'expire_date',
'member_limit',
'_id_attrs',
)
def __init__(
self,
invite_link: str,
creator: User,
is_primary: bool,
is_revoked: bool,
expire_date: datetime.datetime = None,
member_limit: int = None,
**_kwargs: Any,
):
# Required
self.invite_link = invite_link
self.creator = creator
self.is_primary = is_primary
self.is_revoked = is_revoked
# Optionals
self.expire_date = expire_date
self.member_limit = int(member_limit) if member_limit is not None else None
self._id_attrs = (self.invite_link, self.creator, self.is_primary, self.is_revoked)
@classmethod
def de_json(cls, data: Optional[JSONDict], bot: 'Bot') -> Optional['ChatInviteLink']:
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
if not data:
return None
data['creator'] = User.de_json(data.get('creator'), bot)
data['expire_date'] = from_timestamp(data.get('expire_date', None))
return cls(**data)
def to_dict(self) -> JSONDict:
"""See :meth:`telegram.TelegramObject.to_dict`."""
data = super().to_dict()
data['expire_date'] = to_timestamp(self.expire_date)
return data
| leandrotoledo/python-telegram-bot | telegram/chatinvitelink.py | Python | lgpl-3.0 | 4,245 |
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import itertools
import re
from copy import copy
from powerline.lib.unicode import unicode
from powerline.lint.markedjson.error import echoerr, DelayedEchoErr, NON_PRINTABLE_STR
from powerline.lint.selfcheck import havemarks
NON_PRINTABLE_RE = re.compile(
NON_PRINTABLE_STR.translate({
ord('\t'): None,
ord('\n'): None,
0x0085: None,
})
)
class Spec(object):
'''Class that describes some JSON value
In powerline it is only used to describe JSON values stored in powerline
configuration.
:param dict keys:
Dictionary that maps keys that may be present in the given JSON
dictionary to their descriptions. If this parameter is not empty it
implies that described value has dictionary type. Non-dictionary types
must be described using ``Spec()``: without arguments.
.. note::
Methods that create the specifications return ``self``, so calls to them
may be chained: ``Spec().type(unicode).re('^\w+$')``. This does not
apply to functions that *apply* specification like :py:meth`Spec.match`.
.. note::
Methods starting with ``check_`` return two values: first determines
whether caller should proceed on running other checks, second
determines whether there were any problems (i.e. whether error was
reported). One should not call these methods directly: there is
:py:meth:`Spec.match` method for checking values.
.. note::
In ``check_`` and ``match`` methods specifications are identified by
their indexes for the purpose of simplyfying :py:meth:`Spec.copy`
method.
Some common parameters:
``data``:
Whatever data supplied by the first caller for checker functions. Is not
processed by :py:class:`Spec` methods in any fashion.
``context``:
:py:class:`powerline.lint.context.Context` instance, describes context
of the value. :py:class:`Spec` methods only use its ``.key`` methods for
error messages.
``echoerr``:
Callable that should be used to echo errors. Is supposed to take four
optional keyword arguments: ``problem``, ``problem_mark``, ``context``,
``context_mark``.
``value``:
Checked value.
'''
def __init__(self, **keys):
self.specs = []
self.keys = {}
self.checks = []
self.cmsg = ''
self.isoptional = False
self.uspecs = []
self.ufailmsg = lambda key: 'found unknown key: {0}'.format(key)
self.did_type = False
self.update(**keys)
def update(self, **keys):
'''Describe additional keys that may be present in given JSON value
If called with some keyword arguments implies that described value is
a dictionary. If called without keyword parameters it is no-op.
:return: self.
'''
for k, v in keys.items():
self.keys[k] = len(self.specs)
self.specs.append(v)
if self.keys and not self.did_type:
self.type(dict)
self.did_type = True
return self
def copy(self, copied=None):
'''Deep copy the spec
:param dict copied:
Internal dictionary used for storing already copied values. This
parameter should not be used.
:return: New :py:class:`Spec` object that is a deep copy of ``self``.
'''
copied = copied or {}
try:
return copied[id(self)]
except KeyError:
instance = self.__class__()
copied[id(self)] = instance
return self.__class__()._update(self.__dict__, copied)
def _update(self, d, copied):
'''Helper for the :py:meth:`Spec.copy` function
Populates new instance with values taken from the old one.
:param dict d:
``__dict__`` of the old instance.
:param dict copied:
Storage for already copied values.
'''
self.__dict__.update(d)
self.keys = copy(self.keys)
self.checks = copy(self.checks)
self.uspecs = copy(self.uspecs)
self.specs = [spec.copy(copied) for spec in self.specs]
return self
def unknown_spec(self, keyfunc, spec):
'''Define specification for non-static keys
This method should be used if key names cannot be determined at runtime
or if a number of keys share identical spec (in order to not repeat it).
:py:meth:`Spec.match` method processes dictionary in the given order:
* First it tries to use specifications provided at the initialization or
by the :py:meth:`Spec.update` method.
* If no specification for given key was provided it processes
specifications from ``keyfunc`` argument in order they were supplied.
Once some key matches specification supplied second ``spec`` argument
is used to determine correctness of the value.
:param Spec keyfunc:
:py:class:`Spec` instance or a regular function that returns two
values (the same :py:meth:`Spec.match` returns). This argument is
used to match keys that were not provided at initialization or via
:py:meth:`Spec.update`.
:param Spec spec:
:py:class:`Spec` instance that will be used to check keys matched by
``keyfunc``.
:return: self.
'''
if isinstance(keyfunc, Spec):
self.specs.append(keyfunc)
keyfunc = len(self.specs) - 1
self.specs.append(spec)
self.uspecs.append((keyfunc, len(self.specs) - 1))
return self
def unknown_msg(self, msgfunc):
'''Define message which will be used when unknown key was found
“Unknown” is a key that was not provided at the initialization and via
:py:meth:`Spec.update` and did not match any ``keyfunc`` proided via
:py:meth:`Spec.unknown_spec`.
:param msgfunc:
Function that takes that unknown key as an argument and returns the
message text. Text will appear at the top (start of the sentence).
:return: self.
'''
self.ufailmsg = msgfunc
return self
def context_message(self, msg):
'''Define message that describes context
:param str msg:
Message that describes context. Is written using the
:py:meth:`str.format` syntax and is expected to display keyword
parameter ``key``.
:return: self.
'''
self.cmsg = msg
for spec in self.specs:
if not spec.cmsg:
spec.context_message(msg)
return self
def check_type(self, value, context_mark, data, context, echoerr, types):
'''Check that given value matches given type(s)
:param tuple types:
List of accepted types. Since :py:class:`Spec` is supposed to
describe JSON values only ``dict``, ``list``, ``unicode``, ``bool``,
``float`` and ``NoneType`` types make any sense.
:return: proceed, hadproblem.
'''
havemarks(value)
if type(value.value) not in types:
echoerr(
context=self.cmsg.format(key=context.key),
context_mark=context_mark,
problem='{0!r} must be a {1} instance, not {2}'.format(
value,
', '.join((t.__name__ for t in types)),
type(value.value).__name__
),
problem_mark=value.mark
)
return False, True
return True, False
def check_func(self, value, context_mark, data, context, echoerr, func, msg_func):
'''Check value using given function
:param function func:
Callable that should accept four positional parameters:
#. checked value,
#. ``data`` parameter with arbitrary data (supplied by top-level
caller),
#. current context and
#. function used for echoing errors.
This callable should return three values:
#. determines whether ``check_func`` caller should proceed
calling other checks,
#. determines whether ``check_func`` should echo error on its own
(it should be set to False if ``func`` echoes error itself) and
#. determines whether function has found some errors in the checked
value.
:param function msg_func:
Callable that takes checked value as the only positional parameter
and returns a string that describes the problem. Only useful for
small checker functions since it is ignored when second returned
value is false.
:return: proceed, hadproblem.
'''
havemarks(value)
proceed, echo, hadproblem = func(value, data, context, echoerr)
if echo and hadproblem:
echoerr(context=self.cmsg.format(key=context.key),
context_mark=context_mark,
problem=msg_func(value),
problem_mark=value.mark)
return proceed, hadproblem
def check_list(self, value, context_mark, data, context, echoerr, item_func, msg_func):
'''Check that each value in the list matches given specification
:param function item_func:
Callable like ``func`` from :py:meth:`Spec.check_func`. Unlike
``func`` this callable is called for each value in the list and may
be a :py:class:`Spec` object index.
:param func msg_func:
Callable like ``msg_func`` from :py:meth:`Spec.check_func`. Should
accept one problematic item and is not used for :py:class:`Spec`
object indicies in ``item_func`` method.
:return: proceed, hadproblem.
'''
havemarks(value)
i = 0
hadproblem = False
for item in value:
havemarks(item)
if isinstance(item_func, int):
spec = self.specs[item_func]
proceed, fhadproblem = spec.match(
item,
value.mark,
data,
context.enter_item('list item ' + unicode(i), item),
echoerr
)
else:
proceed, echo, fhadproblem = item_func(item, data, context, echoerr)
if echo and fhadproblem:
echoerr(context=self.cmsg.format(key=context.key + '/list item ' + unicode(i)),
context_mark=value.mark,
problem=msg_func(item),
problem_mark=item.mark)
if fhadproblem:
hadproblem = True
if not proceed:
return proceed, hadproblem
i += 1
return True, hadproblem
def check_either(self, value, context_mark, data, context, echoerr, start, end):
'''Check that given value matches one of the given specifications
:param int start:
First specification index.
:param int end:
Specification index that is greater by 1 then last specification
index.
This method does not give an error if any specification from
``self.specs[start:end]`` is matched by the given value.
'''
havemarks(value)
new_echoerr = DelayedEchoErr(
echoerr,
'One of the either variants failed. Messages from the first variant:',
'messages from the next variant:'
)
hadproblem = False
for spec in self.specs[start:end]:
proceed, hadproblem = spec.match(value, value.mark, data, context, new_echoerr)
new_echoerr.next_variant()
if not proceed:
break
if not hadproblem:
return True, False
new_echoerr.echo_all()
return False, hadproblem
def check_tuple(self, value, context_mark, data, context, echoerr, start, end):
'''Check that given value is a list with items matching specifications
:param int start:
First specification index.
:param int end:
Specification index that is greater by 1 then last specification
index.
This method checks that each item in the value list matches
specification with index ``start + item_number``.
'''
havemarks(value)
hadproblem = False
for (i, item, spec) in zip(itertools.count(), value, self.specs[start:end]):
proceed, ihadproblem = spec.match(
item,
value.mark,
data,
context.enter_item('tuple item ' + unicode(i), item),
echoerr
)
if ihadproblem:
hadproblem = True
if not proceed:
return False, hadproblem
return True, hadproblem
def check_printable(self, value, context_mark, data, context, echoerr, _):
'''Check that given unicode string contains only printable characters
'''
hadproblem = False
for match in NON_PRINTABLE_RE.finditer(value):
hadproblem = True
echoerr(
context=self.cmsg.format(key=context.key),
context_mark=value.mark,
problem='found not printable character U+{0:04x} in a configuration string'.format(
ord(match.group(0))),
problem_mark=value.mark.advance_string(match.start() + 1)
)
return True, hadproblem
def printable(self, *args):
self.type(unicode)
self.checks.append(('check_printable', args))
return self
def type(self, *args):
'''Describe value that has one of the types given in arguments
:param args:
List of accepted types. Since :py:class:`Spec` is supposed to
describe JSON values only ``dict``, ``list``, ``unicode``, ``bool``,
``float`` and ``NoneType`` types make any sense.
:return: self.
'''
self.checks.append(('check_type', args))
return self
cmp_funcs = {
'le': lambda x, y: x <= y,
'lt': lambda x, y: x < y,
'ge': lambda x, y: x >= y,
'gt': lambda x, y: x > y,
'eq': lambda x, y: x == y,
}
cmp_msgs = {
'le': 'lesser or equal to',
'lt': 'lesser then',
'ge': 'greater or equal to',
'gt': 'greater then',
'eq': 'equal to',
}
def len(self, comparison, cint, msg_func=None):
'''Describe value that has given length
:param str comparison:
Type of the comparison. Valid values: ``le``, ``lt``, ``ge``,
``gt``, ``eq``.
:param int cint:
Integer with which length is compared.
:param function msg_func:
Function that should accept checked value and return message that
describes the problem with this value. Default value will emit
something like “length of ['foo', 'bar'] is not greater then 10”.
:return: self.
'''
cmp_func = self.cmp_funcs[comparison]
msg_func = (
msg_func
or (lambda value: 'length of {0!r} is not {1} {2}'.format(
value, self.cmp_msgs[comparison], cint))
)
self.checks.append((
'check_func',
(lambda value, *args: (True, True, not cmp_func(len(value), cint))),
msg_func
))
return self
def cmp(self, comparison, cint, msg_func=None):
'''Describe value that is a number or string that has given property
:param str comparison:
Type of the comparison. Valid values: ``le``, ``lt``, ``ge``,
``gt``, ``eq``. This argument will restrict the number or string to
emit True on the given comparison.
:param cint:
Number or string with which value is compared. Type of this
parameter affects required type of the checked value: ``str`` and
``unicode`` types imply ``unicode`` values, ``float`` type implies
that value can be either ``int`` or ``float``, ``int`` type implies
``int`` value and for any other type the behavior is undefined.
:param function msg_func:
Function that should accept checked value and return message that
describes the problem with this value. Default value will emit
something like “10 is not greater then 10”.
:return: self.
'''
if type(cint) is str:
self.type(unicode)
elif type(cint) is float:
self.type(int, float)
else:
self.type(type(cint))
cmp_func = self.cmp_funcs[comparison]
msg_func = msg_func or (lambda value: '{0} is not {1} {2}'.format(value, self.cmp_msgs[comparison], cint))
self.checks.append((
'check_func',
(lambda value, *args: (True, True, not cmp_func(value.value, cint))),
msg_func
))
return self
def unsigned(self, msg_func=None):
'''Describe unsigned integer value
:param function msg_func:
Function that should accept checked value and return message that
describes the problem with this value.
:return: self.
'''
self.type(int)
self.checks.append((
'check_func',
(lambda value, *args: (True, True, value < 0)),
(lambda value: '{0} must be greater then zero'.format(value))
))
return self
def list(self, item_func, msg_func=None):
'''Describe list with any number of elements, each matching given spec
:param item_func:
:py:class:`Spec` instance or a callable. Check out
:py:meth:`Spec.check_list` documentation for more details. Note that
in :py:meth:`Spec.check_list` description :py:class:`Spec` instance
is replaced with its index in ``self.specs``.
:param function msg_func:
Function that should accept checked value and return message that
describes the problem with this value. Default value will emit just
“failed check”, which is rather indescriptive.
:return: self.
'''
self.type(list)
if isinstance(item_func, Spec):
self.specs.append(item_func)
item_func = len(self.specs) - 1
self.checks.append(('check_list', item_func, msg_func or (lambda item: 'failed check')))
return self
def tuple(self, *specs):
'''Describe list with the given number of elements, each matching corresponding spec
:param (Spec,) specs:
List of specifications. Last element(s) in this list may be
optional. Each element in this list describes element with the same
index in the checked value. Check out :py:meth:`Spec.check_tuple`
for more details, but note that there list of specifications is
replaced with start and end indicies in ``self.specs``.
:return: self.
'''
self.type(list)
max_len = len(specs)
min_len = max_len
for spec in reversed(specs):
if spec.isoptional:
min_len -= 1
else:
break
if max_len == min_len:
self.len('eq', len(specs))
else:
if min_len > 0:
self.len('ge', min_len)
self.len('le', max_len)
start = len(self.specs)
for i, spec in zip(itertools.count(), specs):
self.specs.append(spec)
self.checks.append(('check_tuple', start, len(self.specs)))
return self
def func(self, func, msg_func=None):
'''Describe value that is checked by the given function
Check out :py:meth:`Spec.check_func` documentation for more details.
'''
self.checks.append(('check_func', func, msg_func or (lambda value: 'failed check')))
return self
def re(self, regex, msg_func=None):
'''Describe value that is a string that matches given regular expression
:param str regex:
Regular expression that should be matched by the value.
:param function msg_func:
Function that should accept checked value and return message that
describes the problem with this value. Default value will emit
something like “String "xyz" does not match "[a-f]+"”.
:return: self.
'''
self.type(unicode)
compiled = re.compile(regex)
msg_func = msg_func or (lambda value: 'String "{0}" does not match "{1}"'.format(value, regex))
self.checks.append((
'check_func',
(lambda value, *args: (True, True, not compiled.match(value.value))),
msg_func
))
return self
def ident(self, msg_func=None):
'''Describe value that is an identifier like ``foo:bar`` or ``foo``
:param function msg_func:
Function that should accept checked value and return message that
describes the problem with this value. Default value will emit
something like “String "xyz" is not an … identifier”.
:return: self.
'''
msg_func = (
msg_func
or (lambda value: 'String "{0}" is not an alphanumeric/underscore colon-separated identifier'.format(value))
)
return self.re('^\w+(?::\w+)?$', msg_func)
def oneof(self, collection, msg_func=None):
'''Describe value that is equal to one of the value in the collection
:param set collection:
A collection of possible values.
:param function msg_func:
Function that should accept checked value and return message that
describes the problem with this value. Default value will emit
something like “"xyz" must be one of {'abc', 'def', 'ghi'}”.
:return: self.
'''
msg_func = msg_func or (lambda value: '"{0}" must be one of {1!r}'.format(value, list(collection)))
self.checks.append((
'check_func',
(lambda value, *args: (True, True, value not in collection)),
msg_func
))
return self
def error(self, msg):
'''Describe value that must not be there
Useful for giving more descriptive errors for some specific keys then
just “found unknown key: shutdown_event” or for forbidding certain
values when :py:meth:`Spec.unknown_spec` was used.
:param str msg:
Message given for the offending value. It is formatted using
:py:meth:`str.format` with the only positional parameter which is
the value itself.
:return: self.
'''
self.checks.append((
'check_func',
(lambda *args: (True, True, True)),
(lambda value: msg.format(value))
))
return self
def either(self, *specs):
'''Describes value that matches one of the given specs
Check out :py:meth:`Spec.check_either` method documentation for more
details, but note that there a list of specs was replaced by start and
end indicies in ``self.specs``.
:return: self.
'''
start = len(self.specs)
self.specs.extend(specs)
self.checks.append(('check_either', start, len(self.specs)))
return self
def optional(self):
'''Mark value as optional
Only useful for key specs in :py:meth:`Spec.__init__` and
:py:meth:`Spec.update` and some last supplied to :py:meth:`Spec.tuple`.
:return: self.
'''
self.isoptional = True
return self
def required(self):
'''Mark value as required
Only useful for key specs in :py:meth:`Spec.__init__` and
:py:meth:`Spec.update` and some last supplied to :py:meth:`Spec.tuple`.
.. note::
Value is required by default. This method is only useful for
altering existing specification (or rather its copy).
:return: self.
'''
self.isoptional = False
return self
def match_checks(self, *args):
'''Process checks registered for the given value
Processes only “top-level” checks: key specifications given using at the
initialization or via :py:meth:`Spec.unknown_spec` are processed by
:py:meth:`Spec.match`.
:return: proceed, hadproblem.
'''
hadproblem = False
for check in self.checks:
proceed, chadproblem = getattr(self, check[0])(*(args + check[1:]))
if chadproblem:
hadproblem = True
if not proceed:
return False, hadproblem
return True, hadproblem
def match(self, value, context_mark=None, data=None, context=(), echoerr=echoerr):
'''Check that given value matches this specification
:return: proceed, hadproblem.
'''
havemarks(value)
proceed, hadproblem = self.match_checks(value, context_mark, data, context, echoerr)
if proceed:
if self.keys or self.uspecs:
for key, vali in self.keys.items():
valspec = self.specs[vali]
if key in value:
proceed, mhadproblem = valspec.match(
value[key],
value.mark,
data,
context.enter_key(value, key),
echoerr
)
if mhadproblem:
hadproblem = True
if not proceed:
return False, hadproblem
else:
if not valspec.isoptional:
hadproblem = True
echoerr(context=self.cmsg.format(key=context.key),
context_mark=None,
problem='required key is missing: {0}'.format(key),
problem_mark=value.mark)
for key in value.keys():
havemarks(key)
if key not in self.keys:
for keyfunc, vali in self.uspecs:
valspec = self.specs[vali]
if isinstance(keyfunc, int):
spec = self.specs[keyfunc]
proceed, khadproblem = spec.match(key, context_mark, data, context, echoerr)
else:
proceed, khadproblem = keyfunc(key, data, context, echoerr)
if khadproblem:
hadproblem = True
if proceed:
proceed, vhadproblem = valspec.match(
value[key],
value.mark,
data,
context.enter_key(value, key),
echoerr
)
if vhadproblem:
hadproblem = True
break
else:
hadproblem = True
if self.ufailmsg:
echoerr(context=self.cmsg.format(key=context.key),
context_mark=None,
problem=self.ufailmsg(key),
problem_mark=key.mark)
return True, hadproblem
def __getitem__(self, key):
'''Get specification for the given key
'''
return self.specs[self.keys[key]]
def __setitem__(self, key, value):
'''Set specification for the given key
'''
self.update(**{key: value})
| zeroc0d3/docker-lab | vim/rootfs/usr/lib/python2.7/dist-packages/powerline/lint/spec.py | Python | mit | 23,514 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.